patrickvonplaten commited on
Commit
12e7e1e
1 Parent(s): 8ad7fe9
Files changed (2) hide show
  1. config.json +49 -14
  2. pytorch_model.bin +2 -2
config.json CHANGED
@@ -1,13 +1,16 @@
1
  {
2
  "activation_dropout": 0.1,
 
 
3
  "add_adapter": false,
4
  "architectures": [
5
- "Data2VecForAudioModel"
6
  ],
7
  "attention_dropout": 0.1,
8
- "attention_probs_dropout_prob": 0.1,
9
- "bos_token_id": 0,
10
- "classifier_dropout": null,
 
11
  "conv_bias": false,
12
  "conv_dim": [
13
  512,
@@ -37,33 +40,65 @@
37
  2,
38
  2
39
  ],
40
- "do_stable_layer_norm": false,
 
 
41
  "eos_token_id": 2,
42
  "feat_extract_activation": "gelu",
43
- "feat_extract_norm": "layer",
44
  "feat_proj_dropout": 0.0,
 
45
  "final_dropout": 0.1,
46
  "hidden_act": "gelu",
47
  "hidden_dropout": 0.1,
48
- "hidden_dropout_prob": 0.1,
49
  "hidden_size": 768,
50
  "initializer_range": 0.02,
51
  "intermediate_size": 3072,
52
  "layer_norm_eps": 1e-05,
 
 
 
 
 
 
53
  "mask_time_prob": 0.05,
54
- "max_position_embeddings": 512,
55
- "model_type": "data2vec",
56
  "num_attention_heads": 12,
 
 
57
  "num_conv_pos_embedding_groups": 16,
58
  "num_conv_pos_embeddings": 5,
59
  "num_feat_extract_layers": 7,
60
  "num_hidden_layers": 12,
61
- "pad_token_id": 1,
62
- "position_embedding_type": "absolute",
 
63
  "proj_codevector_dim": 256,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  "torch_dtype": "float32",
65
  "transformers_version": "4.17.0.dev0",
66
- "type_vocab_size": 2,
67
- "use_cache": true,
68
- "vocab_size": 32
69
  }
 
1
  {
2
  "activation_dropout": 0.1,
3
+ "adapter_kernel_size": 3,
4
+ "adapter_stride": 2,
5
  "add_adapter": false,
6
  "architectures": [
7
+ "Data2VecAudioModel"
8
  ],
9
  "attention_dropout": 0.1,
10
+ "bos_token_id": 1,
11
+ "classifier_proj_size": 256,
12
+ "codevector_dim": 256,
13
+ "contrastive_logits_temperature": 0.1,
14
  "conv_bias": false,
15
  "conv_dim": [
16
  512,
 
40
  2,
41
  2
42
  ],
43
+ "ctc_loss_reduction": "sum",
44
+ "ctc_zero_infinity": false,
45
+ "diversity_loss_weight": 0.1,
46
  "eos_token_id": 2,
47
  "feat_extract_activation": "gelu",
48
+ "feat_extract_norm": "group",
49
  "feat_proj_dropout": 0.0,
50
+ "feat_quantizer_dropout": 0.0,
51
  "final_dropout": 0.1,
52
  "hidden_act": "gelu",
53
  "hidden_dropout": 0.1,
 
54
  "hidden_size": 768,
55
  "initializer_range": 0.02,
56
  "intermediate_size": 3072,
57
  "layer_norm_eps": 1e-05,
58
+ "layerdrop": 0.1,
59
+ "mask_feature_length": 10,
60
+ "mask_feature_min_masks": 0,
61
+ "mask_feature_prob": 0.0,
62
+ "mask_time_length": 10,
63
+ "mask_time_min_masks": 2,
64
  "mask_time_prob": 0.05,
65
+ "model_type": "data2vec-audio",
66
+ "num_adapter_layers": 3,
67
  "num_attention_heads": 12,
68
+ "num_codevector_groups": 2,
69
+ "num_codevectors_per_group": 320,
70
  "num_conv_pos_embedding_groups": 16,
71
  "num_conv_pos_embeddings": 5,
72
  "num_feat_extract_layers": 7,
73
  "num_hidden_layers": 12,
74
+ "num_negatives": 100,
75
+ "output_hidden_size": 768,
76
+ "pad_token_id": 0,
77
  "proj_codevector_dim": 256,
78
+ "tdnn_dilation": [
79
+ 1,
80
+ 2,
81
+ 3,
82
+ 1,
83
+ 1
84
+ ],
85
+ "tdnn_dim": [
86
+ 512,
87
+ 512,
88
+ 512,
89
+ 512,
90
+ 1500
91
+ ],
92
+ "tdnn_kernel": [
93
+ 5,
94
+ 3,
95
+ 3,
96
+ 1,
97
+ 1
98
+ ],
99
  "torch_dtype": "float32",
100
  "transformers_version": "4.17.0.dev0",
101
+ "use_weighted_layer_sum": false,
102
+ "vocab_size": 32,
103
+ "xvector_output_dim": 512
104
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c61a644c4607cae1961a4b3d36fcc66b9e716068458af0c742de598f522b361
3
- size 372742325
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:999df36ad12943c3848d8e2323266cf875d906ec5119d3f398bbff45f45a11d2
3
+ size 372742261