imatag-vch commited on
Commit
d5695ca
1 Parent(s): 30ec35e

Upload ResNetForZeroBitWatermarkDetection

Browse files
Files changed (3) hide show
  1. config.json +45 -0
  2. modeling_resnet.py +57 -0
  3. pytorch_model.bin +2 -2
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ResNetForZeroBitWatermarkDetection"
4
+ ],
5
+ "auto_map": {
6
+ "AutoModel": "modeling_resnet.ResNetForZeroBitWatermarkDetection"
7
+ },
8
+ "depths": [
9
+ 2,
10
+ 2,
11
+ 2,
12
+ 2
13
+ ],
14
+ "downsample_in_first_stage": false,
15
+ "embedding_size": 64,
16
+ "hidden_act": "relu",
17
+ "hidden_sizes": [
18
+ 64,
19
+ 128,
20
+ 256,
21
+ 512
22
+ ],
23
+ "id2label": {
24
+ "0": "watermarked by chance"
25
+ },
26
+ "label2id": null,
27
+ "layer_type": "basic",
28
+ "model_type": "resnet",
29
+ "num_channels": 3,
30
+ "out_features": [
31
+ "stage4"
32
+ ],
33
+ "out_indices": [
34
+ 4
35
+ ],
36
+ "stage_names": [
37
+ "stem",
38
+ "stage1",
39
+ "stage2",
40
+ "stage3",
41
+ "stage4"
42
+ ],
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.30.2"
45
+ }
modeling_resnet.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+ from transformers import ResNetPreTrainedModel
5
+ from transformers.modeling_outputs import ImageClassifierOutputWithNoAttention
6
+ from transformers.image_processing_utils import BaseImageProcessor
7
+ from transformers import ResNetConfig, ResNetModel
8
+ from typing import Optional
9
+
10
+ class ResNetForZeroBitWatermarkDetection(ResNetPreTrainedModel):
11
+ def __init__(self, config):
12
+ super().__init__(config)
13
+ self.resnet = ResNetModel(config)
14
+ self.classifier = nn.Sequential(
15
+ nn.Flatten(),
16
+ nn.Linear(config.hidden_sizes[-1], 128),
17
+ nn.ReLU(),
18
+ nn.Dropout(0.2),
19
+ nn.Linear(128, 1))
20
+ self.register_buffer('beta', torch.tensor([1.0]))
21
+
22
+ # initialize weights and apply final processing
23
+ self.post_init()
24
+
25
+
26
+ # TODO docstring
27
+ def forward(
28
+ self,
29
+ pixel_values: Optional[torch.FloatTensor] = None,
30
+ output_hidden_states: Optional[bool] = None,
31
+ return_dict: Optional[bool] = None,
32
+ ) -> ImageClassifierOutputWithNoAttention:
33
+
34
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
35
+
36
+ outputs = self.resnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
37
+
38
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
39
+
40
+ x = self.classifier(pooled_output)
41
+
42
+ # generalized-Gaussian recalibration, centering and scaling is already included in last linear layer
43
+ x = 0.5 + torch.sign(x) * 0.5 * torch.special.gammainc(1 / self.beta, torch.abs(x)**self.beta)
44
+
45
+ # Laplacian calibration, centering and scaling is already included in last linear layer
46
+ # if beta==1
47
+ #x = 0.5 + torch.sign(x) * 0.5 * (1 - torch.exp(-torch.abs(x))) # laplacian
48
+
49
+ logits = torch.log(x) - torch.log1p(-x)
50
+
51
+ loss = None
52
+
53
+ if not return_dict:
54
+ output = (logits,) + outputs[2:]
55
+ return (loss,) + output if loss is not None else output
56
+
57
+ return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c5f0bddc443f36fc1e5020340e9d7d1282408186c13ef4154cb7b17042c2eb5
3
- size 45059901
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e2334747e88820507d79d11ae3693a4c58292ba35b8d011e1a5585eba7fca9e
3
+ size 45051585