jxie commited on
Commit
fb84b3d
1 Parent(s): 2e267df

Upload SMAForSSL

Browse files
Files changed (2) hide show
  1. config.json +100 -0
  2. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/iris/u/jwxie/workspace/domain-agnostic-pretraining/examples/research_projects/domain-agnostic-pretraining/saved_models/bio_pretrained/pfam_guided_self_random_select_masking_recon_small-adamw_torch-lr3e-4-wd0.01-mr0.15/checkpoint-1120000",
3
+ "architectures": [
4
+ "SMAForSSL"
5
+ ],
6
+ "attention_dropout_prob": 0.0,
7
+ "cross_attention_widening_factor": 1,
8
+ "cross_eval_noising_args": null,
9
+ "cross_train_noising_args": [
10
+ [
11
+ "RandomlySelectedCrossAttentionMasking",
12
+ {
13
+ "exclude_seen_reconstruction": true,
14
+ "masking_ratio": 0.15,
15
+ "num_per_query": 4,
16
+ "varying_length": true
17
+ }
18
+ ]
19
+ ],
20
+ "decoder_attention_channels": 512,
21
+ "decoder_heads": 8,
22
+ "decoder_latent_channels": 512,
23
+ "decoder_type": "cross_attention",
24
+ "dense_use_bias": true,
25
+ "drop_path_rate": 0.0,
26
+ "embedded_channels": 512,
27
+ "encoder_cross_attention_channels": 256,
28
+ "encoder_type": "cross_attention",
29
+ "final_project": true,
30
+ "hidden_act": "gelu",
31
+ "hidden_dropout_prob": 0.0,
32
+ "initializer_range": 0.02,
33
+ "input_channels": 3,
34
+ "input_type": "discrete",
35
+ "latent_channels": 1024,
36
+ "layer_norm_eps": 1e-12,
37
+ "layernorm_eps": 1e-12,
38
+ "loss_fn": "mse",
39
+ "max_position_embeddings": 1024,
40
+ "model_type": "perceiver_sma",
41
+ "num_blocks": 1,
42
+ "num_cross_attention_heads": 8,
43
+ "num_discrete_tokens": 262,
44
+ "num_latents": 256,
45
+ "num_outputs": 1024,
46
+ "num_self_attends_per_block": 16,
47
+ "num_self_attention_heads": 8,
48
+ "output_channels": 262,
49
+ "pe_initializer_range": 0.02,
50
+ "post_decoder_layers": null,
51
+ "project_after_concat": true,
52
+ "qk_channels": 256,
53
+ "self_attention_widening_factor": 1,
54
+ "share_decoder_queries": true,
55
+ "share_embedding_weights": true,
56
+ "teacher_args": {
57
+ "auxiliary_loss_fn": "mse",
58
+ "auxiliary_loss_weight": 1.0,
59
+ "ema_args": {
60
+ "ema_decay_end": 0.0,
61
+ "ema_decay_start": 0.0
62
+ },
63
+ "eval_transform_args": [
64
+ [
65
+ "RandomlySelectedCrossAttentionMasking",
66
+ {
67
+ "exclude_seen_reconstruction": true,
68
+ "masking_ratio": 0.15,
69
+ "num_per_query": 4,
70
+ "varying_length": true
71
+ }
72
+ ]
73
+ ],
74
+ "mask_replace": 3,
75
+ "num_layer_target_avg": null,
76
+ "reconstruction_decoder_args": {
77
+ "num_heads": 1,
78
+ "num_outputs": 1024,
79
+ "output_channels": 262,
80
+ "qk_channels": 256,
81
+ "query_num_channels": 512,
82
+ "share_decoder_queries": true,
83
+ "share_embedding_weights": true,
84
+ "use_query_residual": true,
85
+ "v_channels": 512
86
+ },
87
+ "reconstruction_loss_fn": "crossentropy",
88
+ "reconstruction_loss_weight": 1.0,
89
+ "reconstruction_weighted_loss": false,
90
+ "target_normalization_fn": "layernorm",
91
+ "train_transform_args": null
92
+ },
93
+ "teacher_name": "ReconstructionTeacher",
94
+ "torch_dtype": "float32",
95
+ "transformers_version": "4.26.0.dev0",
96
+ "use_decoder": false,
97
+ "use_position_embeddings": true,
98
+ "use_query_residual": true,
99
+ "v_channels": 1024
100
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c78b525243b69e5df56e69e9ee95571544f61880a489d576c21962b6a5fe340a
3
+ size 329512757