desh2608 commited on
Commit
3774ffb
1 Parent(s): ab53346

add modified transducer

Browse files
Files changed (40) hide show
  1. README.md +37 -4
  2. decoding_results/regular_transducer/beam_search/errs-dev-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt +0 -0
  3. decoding_results/regular_transducer/beam_search/errs-test-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt +0 -0
  4. decoding_results/regular_transducer/beam_search/log-decode-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model-2023-06-15-16-05-54 +26 -0
  5. decoding_results/regular_transducer/beam_search/recogs-dev-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt +0 -0
  6. decoding_results/regular_transducer/beam_search/recogs-test-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt +0 -0
  7. decoding_results/regular_transducer/beam_search/wer-summary-dev-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt +2 -0
  8. decoding_results/regular_transducer/beam_search/wer-summary-test-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt +2 -0
  9. decoding_results/regular_transducer/fast_beam_search/errs-dev-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt +0 -0
  10. decoding_results/regular_transducer/fast_beam_search/errs-test-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt +0 -0
  11. decoding_results/regular_transducer/fast_beam_search/log-decode-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model-2023-06-15-16-02-29 +26 -0
  12. decoding_results/regular_transducer/fast_beam_search/recogs-dev-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt +0 -0
  13. decoding_results/regular_transducer/fast_beam_search/recogs-test-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt +0 -0
  14. decoding_results/regular_transducer/fast_beam_search/wer-summary-dev-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt +2 -0
  15. decoding_results/regular_transducer/fast_beam_search/wer-summary-test-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt +2 -0
  16. decoding_results/regular_transducer/greedy_search/errs-dev-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt +0 -0
  17. decoding_results/regular_transducer/greedy_search/errs-test-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt +0 -0
  18. decoding_results/regular_transducer/greedy_search/log-decode-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model-2023-06-15-15-55-01 +26 -0
  19. decoding_results/regular_transducer/greedy_search/recogs-dev-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt +0 -0
  20. decoding_results/regular_transducer/greedy_search/recogs-test-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt +0 -0
  21. decoding_results/regular_transducer/greedy_search/wer-summary-dev-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt +2 -0
  22. decoding_results/regular_transducer/greedy_search/wer-summary-test-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt +2 -0
  23. decoding_results/regular_transducer/modified_beam_search/errs-dev-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt +0 -0
  24. decoding_results/regular_transducer/modified_beam_search/errs-test-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt +0 -0
  25. decoding_results/regular_transducer/modified_beam_search/log-decode-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model-2023-06-15-15-56-46 +8 -0
  26. decoding_results/regular_transducer/modified_beam_search/log-decode-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model-2023-06-15-15-58-01 +27 -0
  27. decoding_results/regular_transducer/modified_beam_search/recogs-dev-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt +0 -0
  28. decoding_results/regular_transducer/modified_beam_search/recogs-test-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt +0 -0
  29. decoding_results/regular_transducer/modified_beam_search/wer-summary-dev-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt +2 -0
  30. decoding_results/regular_transducer/modified_beam_search/wer-summary-test-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt +2 -0
  31. exp/regular_transducer/decode.sh +12 -0
  32. exp/regular_transducer/export.sh +6 -0
  33. exp/regular_transducer/jit_script.pt +3 -0
  34. exp/regular_transducer/log/log-train-2023-06-15-01-56-32-0 +0 -0
  35. exp/regular_transducer/log/log-train-2023-06-15-01-56-32-1 +0 -0
  36. exp/regular_transducer/log/log-train-2023-06-15-01-56-32-2 +0 -0
  37. exp/regular_transducer/log/log-train-2023-06-15-01-56-32-3 +0 -0
  38. exp/regular_transducer/pretrained.pt +3 -0
  39. exp/regular_transducer/tensorboard/events.out.tfevents.1686808592.r2n01.160747.0 +3 -0
  40. exp/regular_transducer/train.sh +10 -0
README.md CHANGED
@@ -9,15 +9,16 @@ metrics:
9
  ---
10
  ### TedLium3 Zipformer
11
 
12
- Using the codes from this PR https://github.com/k2-fsa/icefall/pull/1125.
13
 
14
  The WERs are
15
 
16
  | | dev | test | comment |
17
  |------------------------------------|------------|------------|------------------------------------------|
18
- | greedy search | 6.32 | 5.83 | --epoch 50, --avg 22, --max-duration 500 |
19
- | modified beam search (beam size 4) | 6.16 | 5.79 | --epoch 50, --avg 22, --max-duration 500 |
20
- | fast beam search (set as default) | 6.30 ß | 5.89 | --epoch 50, --avg 22, --max-duration 500 |
 
21
 
22
  The training command for reproducing is given below:
23
 
@@ -81,3 +82,35 @@ avg=22
81
  --max-contexts 4 \
82
  --max-states 8
83
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  ---
10
  ### TedLium3 Zipformer
11
 
12
+ **`rnnt_type=regular`**
13
 
14
  The WERs are
15
 
16
  | | dev | test | comment |
17
  |------------------------------------|------------|------------|------------------------------------------|
18
+ | greedy search | 6.74 | 6.16 | --epoch 50, --avg 22, --max-duration 500 |
19
+ | beam search (beam size 4) | 6.56 | 5.95 | --epoch 50, --avg 22, --max-duration 500 |
20
+ | modified beam search (beam size 4) | 6.54 | 6.00 | --epoch 50, --avg 22, --max-duration 500 |
21
+ | fast beam search (set as default) | 6.91 | 6.28 | --epoch 50, --avg 22, --max-duration 500 |
22
 
23
  The training command for reproducing is given below:
24
 
 
82
  --max-contexts 4 \
83
  --max-states 8
84
  ```
85
+
86
+ **`rnnt_type=modified`**
87
+
88
+ Using the codes from this PR https://github.com/k2-fsa/icefall/pull/1125.
89
+
90
+ The WERs are
91
+
92
+ | | dev | test | comment |
93
+ |------------------------------------|------------|------------|------------------------------------------|
94
+ | greedy search | 6.32 | 5.83 | --epoch 50, --avg 22, --max-duration 500 |
95
+ | modified beam search (beam size 4) | 6.16 | 5.79 | --epoch 50, --avg 22, --max-duration 500 |
96
+ | fast beam search (set as default) | 6.30 | 5.89 | --epoch 50, --avg 22, --max-duration 500 |
97
+
98
+ The training command for reproducing is given below:
99
+
100
+ ```
101
+ export CUDA_VISIBLE_DEVICES="0,1,2,3"
102
+
103
+ ./zipformer/train.py \
104
+ --use-fp16 true \
105
+ --world-size 4 \
106
+ --num-epochs 50 \
107
+ --start-epoch 0 \
108
+ --exp-dir zipformer/exp \
109
+ --max-duration 1000 \
110
+ --rnnt-type modified
111
+ ```
112
+
113
+ The tensorboard training log can be found at
114
+ https://tensorboard.dev/experiment/AKXbJha0S9aXyfmuvG4h5A/#scalars
115
+
116
+ The decoding commands are same as above.
decoding_results/regular_transducer/beam_search/errs-dev-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/beam_search/errs-test-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/beam_search/log-decode-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model-2023-06-15-16-05-54 ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-06-15 16:05:54,523 INFO [decode.py:675] Decoding started
2
+ 2023-06-15 16:05:54,524 INFO [decode.py:681] Device: cuda:0
3
+ 2023-06-15 16:05:54,531 INFO [decode.py:691] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '38211604d6a24b15f320578a1a38f6c12d7a711c', 'k2-git-date': 'Mon Jun 12 10:59:44 2023', 'lhotse-version': '1.15.0.dev+git.f1fd23d.clean', 'torch-version': '2.0.0+cu117', 'torch-cuda-available': True, 'torch-cuda-version': '11.7', 'python-version': '3.8', 'icefall-git-branch': 'ted/zipformer', 'icefall-git-sha1': '323a299-dirty', 'icefall-git-date': 'Tue Jun 13 04:47:15 2023', 'icefall-path': '/exp/draj/jsalt2023/icefall', 'k2-path': '/exp/draj/jsalt2023/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/jsalt2023/lhotse/lhotse/__init__.py', 'hostname': 'r2n02', 'IP address': '10.1.2.2'}, 'epoch': 50, 'iter': 0, 'avg': 22, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp/v5'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'decoding_method': 'beam_search', 'beam_size': 4, 'beam': 20.0, 'ngram_lm_scale': 0.01, 'max_contexts': 8, 'max_states': 64, 'context_size': 2, 'max_sym_per_frame': 1, 'num_paths': 200, 'nbest_scale': 0.5, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'res_dir': PosixPath('zipformer/exp/v5/beam_search'), 'suffix': 'epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500}
4
+ 2023-06-15 16:05:54,532 INFO [decode.py:693] About to create model
5
+ 2023-06-15 16:05:55,243 INFO [decode.py:760] Calculating the averaged model over epoch range from 28 (excluded) to 50
6
+ 2023-06-15 16:05:59,616 INFO [decode.py:794] Number of model parameters: 65549011
7
+ 2023-06-15 16:05:59,617 INFO [asr_datamodule.py:361] About to get dev cuts
8
+ 2023-06-15 16:05:59,620 INFO [asr_datamodule.py:366] About to get test cuts
9
+ 2023-06-15 16:07:14,020 INFO [decode.py:572] batch 0/?, cuts processed until now is 30
10
+ 2023-06-15 16:21:11,947 INFO [decode.py:588] The transcripts are stored in zipformer/exp/v5/beam_search/recogs-dev-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt
11
+ 2023-06-15 16:21:11,979 INFO [utils.py:562] [dev-beam_size_4] %WER 6.56% [1195 / 18226, 187 ins, 400 del, 608 sub ]
12
+ 2023-06-15 16:21:12,046 INFO [decode.py:601] Wrote detailed error stats to zipformer/exp/v5/beam_search/errs-dev-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt
13
+ 2023-06-15 16:21:12,047 INFO [decode.py:617]
14
+ For dev, WER of different settings are:
15
+ beam_size_4 6.56 best for dev
16
+
17
+ 2023-06-15 16:22:30,203 INFO [decode.py:572] batch 0/?, cuts processed until now is 40
18
+ 2023-06-15 16:42:42,018 INFO [decode.py:572] batch 20/?, cuts processed until now is 1063
19
+ 2023-06-15 16:46:02,358 INFO [decode.py:588] The transcripts are stored in zipformer/exp/v5/beam_search/recogs-test-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt
20
+ 2023-06-15 16:46:02,413 INFO [utils.py:562] [test-beam_size_4] %WER 5.95% [1693 / 28430, 212 ins, 683 del, 798 sub ]
21
+ 2023-06-15 16:46:02,509 INFO [decode.py:601] Wrote detailed error stats to zipformer/exp/v5/beam_search/errs-test-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt
22
+ 2023-06-15 16:46:02,511 INFO [decode.py:617]
23
+ For test, WER of different settings are:
24
+ beam_size_4 5.95 best for test
25
+
26
+ 2023-06-15 16:46:02,511 INFO [decode.py:825] Done!
decoding_results/regular_transducer/beam_search/recogs-dev-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/beam_search/recogs-test-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/beam_search/wer-summary-dev-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ beam_size_4 6.56
decoding_results/regular_transducer/beam_search/wer-summary-test-beam_size_4-epoch-50-avg-22-beam_search-beam-size-4-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ beam_size_4 5.95
decoding_results/regular_transducer/fast_beam_search/errs-dev-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/fast_beam_search/errs-test-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/fast_beam_search/log-decode-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model-2023-06-15-16-02-29 ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-06-15 16:02:29,961 INFO [decode.py:675] Decoding started
2
+ 2023-06-15 16:02:29,962 INFO [decode.py:681] Device: cuda:0
3
+ 2023-06-15 16:02:29,970 INFO [decode.py:691] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '38211604d6a24b15f320578a1a38f6c12d7a711c', 'k2-git-date': 'Mon Jun 12 10:59:44 2023', 'lhotse-version': '1.15.0.dev+git.f1fd23d.clean', 'torch-version': '2.0.0+cu117', 'torch-cuda-available': True, 'torch-cuda-version': '11.7', 'python-version': '3.8', 'icefall-git-branch': 'ted/zipformer', 'icefall-git-sha1': '323a299-dirty', 'icefall-git-date': 'Tue Jun 13 04:47:15 2023', 'icefall-path': '/exp/draj/jsalt2023/icefall', 'k2-path': '/exp/draj/jsalt2023/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/jsalt2023/lhotse/lhotse/__init__.py', 'hostname': 'r2n02', 'IP address': '10.1.2.2'}, 'epoch': 50, 'iter': 0, 'avg': 22, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp/v5'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'decoding_method': 'fast_beam_search', 'beam_size': 4, 'beam': 20.0, 'ngram_lm_scale': 0.01, 'max_contexts': 8, 'max_states': 64, 'context_size': 2, 'max_sym_per_frame': 1, 'num_paths': 200, 'nbest_scale': 0.5, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'res_dir': PosixPath('zipformer/exp/v5/fast_beam_search'), 'suffix': 'epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500}
4
+ 2023-06-15 16:02:29,970 INFO [decode.py:693] About to create model
5
+ 2023-06-15 16:02:30,674 INFO [decode.py:760] Calculating the averaged model over epoch range from 28 (excluded) to 50
6
+ 2023-06-15 16:02:50,856 INFO [decode.py:794] Number of model parameters: 65549011
7
+ 2023-06-15 16:02:50,857 INFO [asr_datamodule.py:361] About to get dev cuts
8
+ 2023-06-15 16:02:50,860 INFO [asr_datamodule.py:366] About to get test cuts
9
+ 2023-06-15 16:02:56,598 INFO [decode.py:572] batch 0/?, cuts processed until now is 30
10
+ 2023-06-15 16:03:41,470 INFO [decode.py:588] The transcripts are stored in zipformer/exp/v5/fast_beam_search/recogs-dev-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt
11
+ 2023-06-15 16:03:41,539 INFO [utils.py:562] [dev-beam_20.0_max_contexts_8_max_states_64] %WER 6.91% [1260 / 18226, 182 ins, 467 del, 611 sub ]
12
+ 2023-06-15 16:03:41,601 INFO [decode.py:601] Wrote detailed error stats to zipformer/exp/v5/fast_beam_search/errs-dev-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt
13
+ 2023-06-15 16:03:41,602 INFO [decode.py:617]
14
+ For dev, WER of different settings are:
15
+ beam_20.0_max_contexts_8_max_states_64 6.91 best for dev
16
+
17
+ 2023-06-15 16:03:44,090 INFO [decode.py:572] batch 0/?, cuts processed until now is 40
18
+ 2023-06-15 16:04:22,021 INFO [decode.py:572] batch 20/?, cuts processed until now is 1063
19
+ 2023-06-15 16:04:34,908 INFO [decode.py:588] The transcripts are stored in zipformer/exp/v5/fast_beam_search/recogs-test-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt
20
+ 2023-06-15 16:04:34,954 INFO [utils.py:562] [test-beam_20.0_max_contexts_8_max_states_64] %WER 6.28% [1785 / 28430, 186 ins, 788 del, 811 sub ]
21
+ 2023-06-15 16:04:35,049 INFO [decode.py:601] Wrote detailed error stats to zipformer/exp/v5/fast_beam_search/errs-test-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt
22
+ 2023-06-15 16:04:35,050 INFO [decode.py:617]
23
+ For test, WER of different settings are:
24
+ beam_20.0_max_contexts_8_max_states_64 6.28 best for test
25
+
26
+ 2023-06-15 16:04:35,050 INFO [decode.py:825] Done!
decoding_results/regular_transducer/fast_beam_search/recogs-dev-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/fast_beam_search/recogs-test-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/fast_beam_search/wer-summary-dev-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ beam_20.0_max_contexts_8_max_states_64 6.91
decoding_results/regular_transducer/fast_beam_search/wer-summary-test-beam_20.0_max_contexts_8_max_states_64-epoch-50-avg-22-beam-20.0-max-contexts-8-max-states-64-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ beam_20.0_max_contexts_8_max_states_64 6.28
decoding_results/regular_transducer/greedy_search/errs-dev-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/greedy_search/errs-test-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/greedy_search/log-decode-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model-2023-06-15-15-55-01 ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-06-15 15:55:01,021 INFO [decode.py:675] Decoding started
2
+ 2023-06-15 15:55:01,023 INFO [decode.py:681] Device: cuda:0
3
+ 2023-06-15 15:55:01,031 INFO [decode.py:691] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '38211604d6a24b15f320578a1a38f6c12d7a711c', 'k2-git-date': 'Mon Jun 12 10:59:44 2023', 'lhotse-version': '1.15.0.dev+git.f1fd23d.clean', 'torch-version': '2.0.0+cu117', 'torch-cuda-available': True, 'torch-cuda-version': '11.7', 'python-version': '3.8', 'icefall-git-branch': 'ted/zipformer', 'icefall-git-sha1': '323a299-dirty', 'icefall-git-date': 'Tue Jun 13 04:47:15 2023', 'icefall-path': '/exp/draj/jsalt2023/icefall', 'k2-path': '/exp/draj/jsalt2023/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/jsalt2023/lhotse/lhotse/__init__.py', 'hostname': 'r2n04', 'IP address': '10.1.2.4'}, 'epoch': 50, 'iter': 0, 'avg': 22, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp/v5'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'decoding_method': 'greedy_search', 'beam_size': 4, 'beam': 20.0, 'ngram_lm_scale': 0.01, 'max_contexts': 8, 'max_states': 64, 'context_size': 2, 'max_sym_per_frame': 1, 'num_paths': 200, 'nbest_scale': 0.5, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'res_dir': PosixPath('zipformer/exp/v5/greedy_search'), 'suffix': 'epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500}
4
+ 2023-06-15 15:55:01,031 INFO [decode.py:693] About to create model
5
+ 2023-06-15 15:55:01,633 INFO [decode.py:760] Calculating the averaged model over epoch range from 28 (excluded) to 50
6
+ 2023-06-15 15:55:05,547 INFO [decode.py:794] Number of model parameters: 65549011
7
+ 2023-06-15 15:55:05,547 INFO [asr_datamodule.py:361] About to get dev cuts
8
+ 2023-06-15 15:55:05,550 INFO [asr_datamodule.py:366] About to get test cuts
9
+ 2023-06-15 15:55:08,961 INFO [decode.py:572] batch 0/?, cuts processed until now is 30
10
+ 2023-06-15 15:55:22,921 INFO [zipformer.py:1728] name=None, attn_weights_entropy = tensor([3.8087, 3.2133, 3.5770, 3.0500], device='cuda:0')
11
+ 2023-06-15 15:55:27,056 INFO [decode.py:588] The transcripts are stored in zipformer/exp/v5/greedy_search/recogs-dev-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt
12
+ 2023-06-15 15:55:27,088 INFO [utils.py:562] [dev-greedy_search] %WER 6.74% [1228 / 18226, 178 ins, 443 del, 607 sub ]
13
+ 2023-06-15 15:55:27,156 INFO [decode.py:601] Wrote detailed error stats to zipformer/exp/v5/greedy_search/errs-dev-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt
14
+ 2023-06-15 15:55:27,158 INFO [decode.py:617]
15
+ For dev, WER of different settings are:
16
+ greedy_search 6.74 best for dev
17
+
18
+ 2023-06-15 15:55:28,580 INFO [decode.py:572] batch 0/?, cuts processed until now is 40
19
+ 2023-06-15 15:55:49,101 INFO [decode.py:588] The transcripts are stored in zipformer/exp/v5/greedy_search/recogs-test-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt
20
+ 2023-06-15 15:55:49,150 INFO [utils.py:562] [test-greedy_search] %WER 6.16% [1751 / 28430, 197 ins, 725 del, 829 sub ]
21
+ 2023-06-15 15:55:49,248 INFO [decode.py:601] Wrote detailed error stats to zipformer/exp/v5/greedy_search/errs-test-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt
22
+ 2023-06-15 15:55:49,290 INFO [decode.py:617]
23
+ For test, WER of different settings are:
24
+ greedy_search 6.16 best for test
25
+
26
+ 2023-06-15 15:55:49,290 INFO [decode.py:825] Done!
decoding_results/regular_transducer/greedy_search/recogs-dev-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/greedy_search/recogs-test-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/greedy_search/wer-summary-dev-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ greedy_search 6.74
decoding_results/regular_transducer/greedy_search/wer-summary-test-greedy_search-epoch-50-avg-22-context-2-max-sym-per-frame-1-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ greedy_search 6.16
decoding_results/regular_transducer/modified_beam_search/errs-dev-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/modified_beam_search/errs-test-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/modified_beam_search/log-decode-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model-2023-06-15-15-56-46 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ 2023-06-15 15:56:46,258 INFO [decode.py:675] Decoding started
2
+ 2023-06-15 15:56:46,260 INFO [decode.py:681] Device: cuda:0
3
+ 2023-06-15 15:56:46,268 INFO [decode.py:691] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '38211604d6a24b15f320578a1a38f6c12d7a711c', 'k2-git-date': 'Mon Jun 12 10:59:44 2023', 'lhotse-version': '1.15.0.dev+git.f1fd23d.clean', 'torch-version': '2.0.0+cu117', 'torch-cuda-available': True, 'torch-cuda-version': '11.7', 'python-version': '3.8', 'icefall-git-branch': 'ted/zipformer', 'icefall-git-sha1': '323a299-dirty', 'icefall-git-date': 'Tue Jun 13 04:47:15 2023', 'icefall-path': '/exp/draj/jsalt2023/icefall', 'k2-path': '/exp/draj/jsalt2023/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/jsalt2023/lhotse/lhotse/__init__.py', 'hostname': 'r2n04', 'IP address': '10.1.2.4'}, 'epoch': 50, 'iter': 0, 'avg': 22, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp/v5'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'decoding_method': 'modified_beam_search', 'beam_size': 4, 'beam': 20.0, 'ngram_lm_scale': 0.01, 'max_contexts': 8, 'max_states': 64, 'context_size': 2, 'max_sym_per_frame': 1, 'num_paths': 200, 'nbest_scale': 0.5, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'res_dir': PosixPath('zipformer/exp/v5/modified_beam_search'), 'suffix': 'epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500}
4
+ 2023-06-15 15:56:46,268 INFO [decode.py:693] About to create model
5
+ 2023-06-15 15:56:46,927 INFO [decode.py:760] Calculating the averaged model over epoch range from 28 (excluded) to 50
6
+ 2023-06-15 15:56:51,104 INFO [decode.py:794] Number of model parameters: 65549011
7
+ 2023-06-15 15:56:51,104 INFO [asr_datamodule.py:361] About to get dev cuts
8
+ 2023-06-15 15:56:51,106 INFO [asr_datamodule.py:366] About to get test cuts
decoding_results/regular_transducer/modified_beam_search/log-decode-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model-2023-06-15-15-58-01 ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-06-15 15:58:01,483 INFO [decode.py:675] Decoding started
2
+ 2023-06-15 15:58:01,484 INFO [decode.py:681] Device: cuda:0
3
+ 2023-06-15 15:58:01,492 INFO [decode.py:691] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '38211604d6a24b15f320578a1a38f6c12d7a711c', 'k2-git-date': 'Mon Jun 12 10:59:44 2023', 'lhotse-version': '1.15.0.dev+git.f1fd23d.clean', 'torch-version': '2.0.0+cu117', 'torch-cuda-available': True, 'torch-cuda-version': '11.7', 'python-version': '3.8', 'icefall-git-branch': 'ted/zipformer', 'icefall-git-sha1': '323a299-dirty', 'icefall-git-date': 'Tue Jun 13 04:47:15 2023', 'icefall-path': '/exp/draj/jsalt2023/icefall', 'k2-path': '/exp/draj/jsalt2023/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/jsalt2023/lhotse/lhotse/__init__.py', 'hostname': 'r2n04', 'IP address': '10.1.2.4'}, 'epoch': 50, 'iter': 0, 'avg': 22, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp/v5'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'decoding_method': 'modified_beam_search', 'beam_size': 4, 'beam': 20.0, 'ngram_lm_scale': 0.01, 'max_contexts': 8, 'max_states': 64, 'context_size': 2, 'max_sym_per_frame': 1, 'num_paths': 200, 'nbest_scale': 0.5, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'res_dir': PosixPath('zipformer/exp/v5/modified_beam_search'), 'suffix': 'epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500}
4
+ 2023-06-15 15:58:01,492 INFO [decode.py:693] About to create model
5
+ 2023-06-15 15:58:02,075 INFO [decode.py:760] Calculating the averaged model over epoch range from 28 (excluded) to 50
6
+ 2023-06-15 15:58:05,955 INFO [decode.py:794] Number of model parameters: 65549011
7
+ 2023-06-15 15:58:05,956 INFO [asr_datamodule.py:361] About to get dev cuts
8
+ 2023-06-15 15:58:05,958 INFO [asr_datamodule.py:366] About to get test cuts
9
+ 2023-06-15 15:58:15,345 INFO [decode.py:572] batch 0/?, cuts processed until now is 30
10
+ 2023-06-15 15:59:30,951 INFO [decode.py:588] The transcripts are stored in zipformer/exp/v5/modified_beam_search/recogs-dev-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt
11
+ 2023-06-15 15:59:30,981 INFO [utils.py:562] [dev-beam_size_4] %WER 6.54% [1192 / 18226, 188 ins, 402 del, 602 sub ]
12
+ 2023-06-15 15:59:31,044 INFO [decode.py:601] Wrote detailed error stats to zipformer/exp/v5/modified_beam_search/errs-dev-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt
13
+ 2023-06-15 15:59:31,045 INFO [decode.py:617]
14
+ For dev, WER of different settings are:
15
+ beam_size_4 6.54 best for dev
16
+
17
+ 2023-06-15 15:59:36,799 INFO [decode.py:572] batch 0/?, cuts processed until now is 40
18
+ 2023-06-15 16:01:10,193 INFO [decode.py:572] batch 20/?, cuts processed until now is 1063
19
+ 2023-06-15 16:01:25,571 INFO [zipformer.py:1728] name=None, attn_weights_entropy = tensor([1.9471, 2.3907, 3.3421, 3.0589], device='cuda:0')
20
+ 2023-06-15 16:01:28,902 INFO [decode.py:588] The transcripts are stored in zipformer/exp/v5/modified_beam_search/recogs-test-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt
21
+ 2023-06-15 16:01:28,948 INFO [utils.py:562] [test-beam_size_4] %WER 6.00% [1707 / 28430, 204 ins, 699 del, 804 sub ]
22
+ 2023-06-15 16:01:29,043 INFO [decode.py:601] Wrote detailed error stats to zipformer/exp/v5/modified_beam_search/errs-test-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt
23
+ 2023-06-15 16:01:29,044 INFO [decode.py:617]
24
+ For test, WER of different settings are:
25
+ beam_size_4 6.0 best for test
26
+
27
+ 2023-06-15 16:01:29,044 INFO [decode.py:825] Done!
decoding_results/regular_transducer/modified_beam_search/recogs-dev-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/modified_beam_search/recogs-test-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/regular_transducer/modified_beam_search/wer-summary-dev-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ beam_size_4 6.54
decoding_results/regular_transducer/modified_beam_search/wer-summary-test-beam_size_4-epoch-50-avg-22-modified_beam_search-beam-size-4-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ beam_size_4 6.0
exp/regular_transducer/decode.sh ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES="0"
2
+ for m in greedy_search beam_search fast_beam_search modified_beam_search; do
3
+ ./zipformer/decode.py \
4
+ --epoch 50 \
5
+ --avg 22 \
6
+ --use-averaged-model True \
7
+ --exp-dir zipformer/exp \
8
+ --max-duration 500 \
9
+ --causal 0 \
10
+ --decoding-method $m \
11
+ --beam-size 4
12
+ done
exp/regular_transducer/export.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ./zipformer/export.py \
2
+ --exp-dir ./zipformer/exp \
3
+ --bpe-model data/lang_bpe_500/bpe.model \
4
+ --epoch 50 \
5
+ --avg 22 \
6
+ --jit 1
exp/regular_transducer/jit_script.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:254527e341deea8496867da1ea086efa2a33e2ec3c624bb5dc3727bb3f03e9c2
3
+ size 264940478
exp/regular_transducer/log/log-train-2023-06-15-01-56-32-0 ADDED
The diff for this file is too large to render. See raw diff
 
exp/regular_transducer/log/log-train-2023-06-15-01-56-32-1 ADDED
The diff for this file is too large to render. See raw diff
 
exp/regular_transducer/log/log-train-2023-06-15-01-56-32-2 ADDED
The diff for this file is too large to render. See raw diff
 
exp/regular_transducer/log/log-train-2023-06-15-01-56-32-3 ADDED
The diff for this file is too large to render. See raw diff
 
exp/regular_transducer/pretrained.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f533966f4811c5c3d343b307dca956f9b9d63accbf7035b9bcfc4102c584d2e5
3
+ size 262605742
exp/regular_transducer/tensorboard/events.out.tfevents.1686808592.r2n01.160747.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:512da5ba3cbf0b75738c65c1afc259a9b13907f3e8242b704971dca0b9341bed
3
+ size 272322
exp/regular_transducer/train.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES="0,1,2,3"
2
+ ./zipformer/train.py \
3
+ --world-size 4 \
4
+ --num-epochs 50 \
5
+ --start-epoch 1 \
6
+ --use-fp16 1 \
7
+ --exp-dir zipformer/exp \
8
+ --causal 0 \
9
+ --max-duration 1000 \
10
+ --master-port 12345