amaye15 commited on
Commit
0fff8d2
1 Parent(s): d48e82c

Upload AutoEncoder

Browse files
Files changed (3) hide show
  1. config.json +3 -2
  2. model.safetensors +2 -2
  3. modeling_autoencoder.py +44 -17
config.json CHANGED
@@ -6,11 +6,12 @@
6
  "AutoConfig": "modeling_autoencoder.AutoEncoderConfig",
7
  "AutoModel": "modeling_autoencoder.AutoEncoder"
8
  },
 
9
  "compression_rate": 0.5,
10
- "dropout_rate": 0.1,
11
  "input_dim": 784,
12
  "latent_dim": 32,
13
- "layer_types": "rnn",
14
  "model_type": "autoencoder",
15
  "num_layers": 4,
16
  "torch_dtype": "float32",
 
6
  "AutoConfig": "modeling_autoencoder.AutoEncoderConfig",
7
  "AutoModel": "modeling_autoencoder.AutoEncoder"
8
  },
9
+ "bidirectional": true,
10
  "compression_rate": 0.5,
11
+ "dropout_rate": 0.5,
12
  "input_dim": 784,
13
  "latent_dim": 32,
14
+ "layer_types": "lstm",
15
  "model_type": "autoencoder",
16
  "num_layers": 4,
17
  "torch_dtype": "float32",
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0843a9219f8207ea933b09d69c8fd229ed196f2427634d4a250e313c2e7cb790
3
- size 7348552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61c6f8af940de39412307f20af9dedc7df9e669e2a516638ee565e44d8cb5118
3
+ size 47379808
modeling_autoencoder.py CHANGED
@@ -1,15 +1,19 @@
1
 
2
  from torch import nn
3
 
4
- #from autoencoder_model.configuration_autoencoder import AutoEncoderConfig
5
 
 
6
 
 
7
 
 
 
8
 
9
- from transformers import PretrainedConfig, PreTrainedModel
10
-
11
- from transformers import PretrainedConfig
12
 
 
13
  class AutoEncoderConfig(PretrainedConfig):
14
  model_type = "autoencoder"
15
 
@@ -21,6 +25,7 @@ class AutoEncoderConfig(PretrainedConfig):
21
  dropout_rate=None,
22
  num_layers=None,
23
  compression_rate=None,
 
24
  **kwargs
25
  ):
26
  super().__init__(**kwargs)
@@ -30,8 +35,9 @@ class AutoEncoderConfig(PretrainedConfig):
30
  self.dropout_rate = dropout_rate
31
  self.num_layers = num_layers
32
  self.compression_rate = compression_rate
 
33
 
34
- def create_layers(model_section, layer_types, input_dim, latent_dim, num_layers, dropout_rate, compression_rate):
35
 
36
  layers = []
37
  current_dim = input_dim
@@ -52,18 +58,18 @@ def create_layers(model_section, layer_types, input_dim, latent_dim, num_layers,
52
  input_diamensions.reverse()
53
  output_diamensions.reverse()
54
 
 
 
 
55
  for idx, (input_dim, output_dim) in enumerate(zip(input_diamensions, output_diamensions)):
56
  if layer_types == 'linear':
57
  layers.append(nn.Linear(input_dim, output_dim))
58
  elif layer_types == 'lstm':
59
- # Assuming we are using LSTMs in a way that returns a sequence output
60
- layers.append(nn.LSTM(input_dim, output_dim, batch_first=True))
61
  elif layer_types == 'rnn':
62
- # Assuming we are using LSTMs in a way that returns a sequence output
63
- layers.append(nn.RNN(input_dim, output_dim, batch_first=True))
64
  elif layer_types == 'gru':
65
- # Assuming we are using LSTMs in a way that returns a sequence output
66
- layers.append(nn.GRU(input_dim, output_dim, batch_first=True))
67
  if (idx != num_layers - 1) & (dropout_rate != None):
68
  layers.append(nn.Dropout(dropout_rate))
69
  return nn.Sequential(*layers)
@@ -76,19 +82,40 @@ class AutoEncoder(PreTrainedModel):
76
 
77
  self.encoder = create_layers("encoder",
78
  config.layer_types, config.input_dim, config.latent_dim,
79
- config.num_layers, config.dropout_rate, config.compression_rate
 
80
  )
81
  # Assuming symmetry between encoder and decoder
82
  self.decoder = create_layers("decoder",
83
  config.layer_types, config.input_dim, config.latent_dim,
84
- config.num_layers, config.dropout_rate, config.compression_rate
 
85
  )
86
 
87
  def forward(self, x):
88
- # Handle LSTM differently since it outputs (output, (h_n, c_n))
89
- if config.layer_types == ['lstm', 'rnn', 'gru']:
90
- x, _ = self.encoder(x)
91
- x, _ = self.decoder(x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  else:
93
  x = self.encoder(x)
94
  x = self.decoder(x)
 
1
 
2
  from torch import nn
3
 
4
+ from transformers import PretrainedConfig, PreTrainedModel, AutoConfig, AutoModel
5
 
6
+ # from huggingface_hub import notebook_login
7
 
8
+ # notebook_login()
9
 
10
+ # AutoEncoderConfig.register_for_auto_class()
11
+ # AutoEncoder.register_for_auto_class("AutoModel")
12
 
13
+ # AutoConfig.register("autoencoder", AutoEncoderConfig)
14
+ # AutoModel.register(AutoEncoderConfig, AutoModel)
 
15
 
16
+ # autoencoder.push_to_hub("autoencoder")
17
  class AutoEncoderConfig(PretrainedConfig):
18
  model_type = "autoencoder"
19
 
 
25
  dropout_rate=None,
26
  num_layers=None,
27
  compression_rate=None,
28
+ bidirectional=False,
29
  **kwargs
30
  ):
31
  super().__init__(**kwargs)
 
35
  self.dropout_rate = dropout_rate
36
  self.num_layers = num_layers
37
  self.compression_rate = compression_rate
38
+ self.bidirectional = bidirectional
39
 
40
+ def create_layers(model_section, layer_types, input_dim, latent_dim, num_layers, dropout_rate, compression_rate, bidirectional):
41
 
42
  layers = []
43
  current_dim = input_dim
 
58
  input_diamensions.reverse()
59
  output_diamensions.reverse()
60
 
61
+ if bidirectional & (layer_types in ['lstm', 'rnn', 'gru']):
62
+ output_diamensions = [2*value for value in output_diamensions]
63
+
64
  for idx, (input_dim, output_dim) in enumerate(zip(input_diamensions, output_diamensions)):
65
  if layer_types == 'linear':
66
  layers.append(nn.Linear(input_dim, output_dim))
67
  elif layer_types == 'lstm':
68
+ layers.append(nn.LSTM(input_dim, output_dim // (2 if bidirectional else 1), batch_first=True, bidirectional=bidirectional))
 
69
  elif layer_types == 'rnn':
70
+ layers.append(nn.RNN(input_dim, output_dim // (2 if bidirectional else 1), batch_first=True, bidirectional=bidirectional))
 
71
  elif layer_types == 'gru':
72
+ layers.append(nn.GRU(input_dim, output_dim // (2 if bidirectional else 1), batch_first=True, bidirectional=bidirectional))
 
73
  if (idx != num_layers - 1) & (dropout_rate != None):
74
  layers.append(nn.Dropout(dropout_rate))
75
  return nn.Sequential(*layers)
 
82
 
83
  self.encoder = create_layers("encoder",
84
  config.layer_types, config.input_dim, config.latent_dim,
85
+ config.num_layers, config.dropout_rate, config.compression_rate,
86
+ config.bidirectional,
87
  )
88
  # Assuming symmetry between encoder and decoder
89
  self.decoder = create_layers("decoder",
90
  config.layer_types, config.input_dim, config.latent_dim,
91
+ config.num_layers, config.dropout_rate, config.compression_rate,
92
+ config.bidirectional,
93
  )
94
 
95
  def forward(self, x):
96
+ if self.config.layer_types in ['lstm', 'rnn', 'gru']:
97
+ for layer in self.encoder:
98
+ print(layer)
99
+ if isinstance(layer, nn.LSTM):
100
+ x, (h_n, c_n)= layer(x)
101
+
102
+ elif isinstance(layer, nn.RNN):
103
+ x, h_o = layer(x)
104
+ elif isinstance(layer, nn.GRU):
105
+ x, h_o = layer(x)
106
+ else:
107
+ x = layer(x)
108
+
109
+ for layer in self.decoder:
110
+ if isinstance(layer, nn.LSTM):
111
+ x, (h_n, c_n) = layer(x)
112
+ elif isinstance(layer, nn.RNN):
113
+ x, h_o = layer(x)
114
+ elif isinstance(layer, nn.GRU):
115
+ x, h_o = layer(x)
116
+ else:
117
+ x = layer(x)
118
+
119
  else:
120
  x = self.encoder(x)
121
  x = self.decoder(x)