-
ctc-decoding
Added MOdel
-
fast_beam_search
Added MOdel
-
greedy_search
Added MOdel
-
modified_beam_search
Added MOdel
-
streaming
Added MOdel
-
tensorboard
Added MOdel
jit_script_chunk_32_left_128.pt
Detected Pickle imports (133)
- "__torch__.zipformer.Zipformer2Encoder",
- "__torch__.torch.nn.modules.conv.___torch_mangle_87.Conv1d",
- "__torch__.zipformer.___torch_mangle_21.RelPositionMultiheadAttentionWeights",
- "__torch__.torch.nn.modules.linear.___torch_mangle_25.Linear",
- "__torch__.torch.nn.modules.activation.Sigmoid",
- "__torch__.subsampling.ConvNeXt",
- "__torch__.zipformer.___torch_mangle_37.ConvolutionModule",
- "__torch__.zipformer.BypassModule",
- "__torch__.torch.nn.modules.linear.___torch_mangle_7.Linear",
- "__torch__.torch.nn.modules.conv.___torch_mangle_56.Conv1d",
- "__torch__.zipformer.___torch_mangle_85.DownsampledZipformer2Encoder",
- "__torch__.zipformer.___torch_mangle_45.SelfAttention",
- "__torch__.torch.nn.modules.linear.___torch_mangle_20.Linear",
- "__torch__.zipformer.___torch_mangle_38.Zipformer2EncoderLayer",
- "__torch__.scaling.Identity",
- "__torch__.scaling.___torch_mangle_57.ChunkCausalDepthwiseConv1d",
- "__torch__.torch.nn.modules.linear.___torch_mangle_9.Linear",
- "__torch__.torch.nn.modules.linear.___torch_mangle_44.Linear",
- "__torch__.torch.nn.modules.container.___torch_mangle_90.Sequential",
- "__torch__.torch.nn.modules.container.___torch_mangle_39.ModuleList",
- "__torch__.torch.nn.modules.linear.___torch_mangle_71.Linear",
- "__torch__.torch.nn.modules.container.___torch_mangle_86.ModuleList",
- "__torch__.subsampling.Conv2dSubsampling",
- "__torch__.zipformer.___torch_mangle_65.RelPositionMultiheadAttentionWeights",
- "__torch__.torch.nn.modules.linear.___torch_mangle_88.Linear",
- "__torch__.zipformer.CompactRelPositionalEncoding",
- "__torch__.model.AsrModel",
- "__torch__.torch.nn.modules.linear.___torch_mangle_14.Linear",
- "__torch__.scaling.Dropout2",
- "__torch__.torch.nn.modules.linear.___torch_mangle_22.Linear",
- "__torch__.torch.nn.modules.conv.___torch_mangle_79.Conv1d",
- "__torch__.torch.nn.modules.activation.Tanh",
- "__torch__.scaling.___torch_mangle_19.ActivationDropoutAndLinear",
- "__torch__.zipformer.___torch_mangle_6.BypassModule",
- "__torch__.torch.nn.modules.linear.___torch_mangle_53.Linear",
- "__torch__.zipformer.___torch_mangle_15.FeedforwardModule",
- "__torch__.joiner.Joiner",
- "__torch__.torch.nn.modules.linear.___torch_mangle_89.Linear",
- "__torch__.StreamingEncoderModel",
- "__torch__.zipformer.___torch_mangle_84.Zipformer2Encoder",
- "__torch__.scaling.BiasNorm",
- "__torch__.zipformer.RelPositionMultiheadAttentionWeights",
- "__torch__.zipformer.___torch_mangle_40.Zipformer2Encoder",
- "__torch__.torch.nn.modules.conv.Conv1d",
- "__torch__.zipformer.___torch_mangle_28.FeedforwardModule",
- "__torch__.torch.nn.modules.linear.___torch_mangle_66.Linear",
- "__torch__.torch.nn.modules.linear.___torch_mangle_46.Linear",
- "__torch__.torch.nn.modules.linear.___torch_mangle_27.Linear",
- "__torch__.torch.nn.modules.linear.___torch_mangle_67.Linear",
- "__torch__.torch.nn.modules.linear.___torch_mangle_64.Linear",
- "__torch__.zipformer.SimpleUpsample",
- "__torch__.scaling.___torch_mangle_5.ScheduledFloat",
- "__torch__.torch.nn.modules.linear.Linear",
- "__torch__.torch.nn.modules.conv.___torch_mangle_55.Conv1d",
- "__torch__.torch.nn.modules.linear.___torch_mangle_73.Linear",
- "__torch__.torch.nn.modules.linear.___torch_mangle_43.Linear",
- "__torch__.torch.nn.modules.container.___torch_mangle_60.ModuleList",
- "__torch__.torch.nn.modules.linear.___torch_mangle_29.Linear",
- "__torch__.torch.nn.modules.conv.___torch_mangle_2.Conv2d",
- "__torch__.torch.nn.modules.linear.___torch_mangle_12.Linear",
- "__torch__.scaling.ScheduledFloat",
- "__torch__.torch.nn.modules.container.___torch_mangle_83.ModuleList",
- "__torch__.zipformer.___torch_mangle_49.FeedforwardModule",
- "torch._utils._rebuild_tensor_v2",
- "__torch__.torch.nn.modules.linear.___torch_mangle_69.Linear",
- "__torch__.torch.nn.modules.conv.___torch_mangle_0.Conv2d",
- "__torch__.decoder.Decoder",
- "__torch__.torch.nn.modules.linear.___torch_mangle_41.Linear",
- "__torch__.scaling.SwooshR",
- "__torch__.zipformer.___torch_mangle_62.DownsampledZipformer2Encoder",
- "__torch__.torch.nn.modules.conv.___torch_mangle_4.Conv2d",
- "__torch__.torch.nn.modules.linear.___torch_mangle_52.Linear",
- "__torch__.zipformer.___torch_mangle_51.FeedforwardModule",
- "__torch__.zipformer.___torch_mangle_47.FeedforwardModule",
- "__torch__.torch.nn.modules.conv.___torch_mangle_3.Conv2d",
- "__torch__.zipformer.___torch_mangle_72.FeedforwardModule",
- "__torch__.zipformer.___torch_mangle_32.NonlinAttention",
- "collections.OrderedDict",
- "__torch__.torch.nn.modules.conv.___torch_mangle_35.Conv1d",
- "__torch__.torch.nn.modules.linear.___torch_mangle_11.Linear",
- "__torch__.torch.nn.modules.linear.___torch_mangle_63.Linear",
- "__torch__.torch.nn.modules.activation.LogSoftmax",
- "__torch__.zipformer.___torch_mangle_82.Zipformer2EncoderLayer",
- "torch.jit._pickle.build_intlist",
- "__torch__.torch.nn.modules.dropout.Dropout",
- "__torch__.scaling.___torch_mangle_80.ChunkCausalDepthwiseConv1d",
- "__torch__.zipformer.Zipformer2EncoderLayer",
- "__torch__.zipformer.___torch_mangle_13.FeedforwardModule",
- "__torch__.torch.nn.modules.linear.___torch_mangle_33.Linear",
- "__torch__.zipformer.___torch_mangle_61.Zipformer2Encoder",
- "__torch__.torch.nn.modules.conv.___torch_mangle_1.Conv2d",
- "__torch__.zipformer.___torch_mangle_58.ConvolutionModule",
- "__torch__.torch.nn.modules.linear.___torch_mangle_31.Linear",
- "__torch__.torch.nn.modules.conv.___torch_mangle_34.Conv1d",
- "__torch__.zipformer.___torch_mangle_70.FeedforwardModule",
- "__torch__.torch.nn.modules.conv.___torch_mangle_78.Conv1d",
- "__torch__.torch.nn.modules.conv.Conv2d",
- "__torch__.zipformer.___torch_mangle_30.FeedforwardModule",
- "__torch__.zipformer.SimpleDownsample",
- "__torch__.torch.nn.modules.linear.___torch_mangle_23.Linear",
- "__torch__.zipformer.FeedforwardModule",
- "torch.FloatStorage",
- "__torch__.zipformer.ConvolutionModule",
- "__torch__.scaling.___torch_mangle_36.ChunkCausalDepthwiseConv1d",
- "__torch__.torch.nn.modules.linear.___torch_mangle_77.Linear",
- "__torch__.torch.nn.modules.linear.___torch_mangle_50.Linear",
- "__torch__.torch.nn.modules.conv.___torch_mangle_18.Conv1d",
- "__torch__.scaling.ChunkCausalDepthwiseConv1d",
- "__torch__.torch.nn.modules.container.ModuleList",
- "__torch__.zipformer.___torch_mangle_74.FeedforwardModule",
- "__torch__.zipformer.___torch_mangle_68.SelfAttention",
- "__torch__.zipformer.___torch_mangle_54.NonlinAttention",
- "__torch__.zipformer.___torch_mangle_42.RelPositionMultiheadAttentionWeights",
- "__torch__.zipformer.SelfAttention",
- "__torch__.torch.nn.modules.linear.___torch_mangle_17.Linear",
- "__torch__.zipformer.DownsampledZipformer2Encoder",
- "__torch__.torch.nn.modules.linear.___torch_mangle_8.Linear",
- "__torch__.torch.nn.modules.linear.___torch_mangle_16.Linear",
- "__torch__.torch.nn.modules.linear.___torch_mangle_75.Linear",
- "__torch__.zipformer.___torch_mangle_81.ConvolutionModule",
- "__torch__.torch.nn.modules.linear.___torch_mangle_48.Linear",
- "__torch__.torch.nn.modules.container.Sequential",
- "__torch__.torch.nn.modules.linear.___torch_mangle_10.Linear",
- "__torch__.torch.nn.modules.linear.Identity",
- "__torch__.torch.nn.modules.sparse.Embedding",
- "__torch__.zipformer.Zipformer2",
- "__torch__.scaling.SwooshL",
- "__torch__.zipformer.___torch_mangle_24.SelfAttention",
- "__torch__.scaling.ActivationDropoutAndLinear",
- "__torch__.zipformer.___torch_mangle_26.FeedforwardModule",
- "__torch__.zipformer.___torch_mangle_59.Zipformer2EncoderLayer",
- "__torch__.zipformer.NonlinAttention",
- "__torch__.zipformer.___torch_mangle_76.NonlinAttention"
How to fix it?
264 MB
Added MOdel
-
261 MB
Added MOdel