David Piscasio commited on
Commit
aaa41cb
1 Parent(s): 7369193

Added models folder

Browse files
models/__init__.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This package contains modules related to objective functions, optimizations, and network architectures.
2
+
3
+ To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
4
+ You need to implement the following five functions:
5
+ -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
6
+ -- <set_input>: unpack data from dataset and apply preprocessing.
7
+ -- <forward>: produce intermediate results.
8
+ -- <optimize_parameters>: calculate loss, gradients, and update network weights.
9
+ -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
10
+
11
+ In the function <__init__>, you need to define four lists:
12
+ -- self.loss_names (str list): specify the training losses that you want to plot and save.
13
+ -- self.model_names (str list): define networks used in our training.
14
+ -- self.visual_names (str list): specify the images that you want to display and save.
15
+ -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
16
+
17
+ Now you can use the model class by specifying flag '--model dummy'.
18
+ See our template model class 'template_model.py' for more details.
19
+ """
20
+
21
+ import importlib
22
+ from models.base_model import BaseModel
23
+
24
+
25
+ def find_model_using_name(model_name):
26
+ """Import the module "models/[model_name]_model.py".
27
+
28
+ In the file, the class called DatasetNameModel() will
29
+ be instantiated. It has to be a subclass of BaseModel,
30
+ and it is case-insensitive.
31
+ """
32
+ model_filename = "models." + model_name + "_model"
33
+ modellib = importlib.import_module(model_filename)
34
+ model = None
35
+ target_model_name = model_name.replace('_', '') + 'model'
36
+ for name, cls in modellib.__dict__.items():
37
+ if name.lower() == target_model_name.lower() \
38
+ and issubclass(cls, BaseModel):
39
+ model = cls
40
+
41
+ if model is None:
42
+ print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
43
+ exit(0)
44
+
45
+ return model
46
+
47
+
48
+ def get_option_setter(model_name):
49
+ """Return the static method <modify_commandline_options> of the model class."""
50
+ model_class = find_model_using_name(model_name)
51
+ return model_class.modify_commandline_options
52
+
53
+
54
+ def create_model(opt):
55
+ """Create a model given the option.
56
+
57
+ This function warps the class CustomDatasetDataLoader.
58
+ This is the main interface between this package and 'train.py'/'test.py'
59
+
60
+ Example:
61
+ >>> from models import create_model
62
+ >>> model = create_model(opt)
63
+ """
64
+ model = find_model_using_name(opt.model)
65
+ instance = model(opt)
66
+ print("model [%s] was created" % type(instance).__name__)
67
+ return instance
models/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (3.29 kB). View file
 
models/__pycache__/base_model.cpython-38.pyc ADDED
Binary file (10 kB). View file
 
models/__pycache__/networks.cpython-38.pyc ADDED
Binary file (23.4 kB). View file
 
models/__pycache__/test_model.cpython-38.pyc ADDED
Binary file (3.15 kB). View file
 
models/base_model.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from collections import OrderedDict
4
+ from abc import ABC, abstractmethod
5
+ from . import networks
6
+
7
+
8
+ class BaseModel(ABC):
9
+ """This class is an abstract base class (ABC) for models.
10
+ To create a subclass, you need to implement the following five functions:
11
+ -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
12
+ -- <set_input>: unpack data from dataset and apply preprocessing.
13
+ -- <forward>: produce intermediate results.
14
+ -- <optimize_parameters>: calculate losses, gradients, and update network weights.
15
+ -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
16
+ """
17
+
18
+ def __init__(self, opt):
19
+ """Initialize the BaseModel class.
20
+
21
+ Parameters:
22
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
23
+
24
+ When creating your custom class, you need to implement your own initialization.
25
+ In this function, you should first call <BaseModel.__init__(self, opt)>
26
+ Then, you need to define four lists:
27
+ -- self.loss_names (str list): specify the training losses that you want to plot and save.
28
+ -- self.model_names (str list): define networks used in our training.
29
+ -- self.visual_names (str list): specify the images that you want to display and save.
30
+ -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
31
+ """
32
+ self.opt = opt
33
+ self.gpu_ids = opt.gpu_ids
34
+ self.isTrain = opt.isTrain
35
+ self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
36
+ self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
37
+ if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
38
+ torch.backends.cudnn.benchmark = True
39
+ self.loss_names = []
40
+ self.model_names = []
41
+ self.visual_names = []
42
+ self.optimizers = []
43
+ self.image_paths = []
44
+ self.metric = 0 # used for learning rate policy 'plateau'
45
+
46
+ @staticmethod
47
+ def modify_commandline_options(parser, is_train):
48
+ """Add new model-specific options, and rewrite default values for existing options.
49
+
50
+ Parameters:
51
+ parser -- original option parser
52
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
53
+
54
+ Returns:
55
+ the modified parser.
56
+ """
57
+ return parser
58
+
59
+ @abstractmethod
60
+ def set_input(self, input):
61
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
62
+
63
+ Parameters:
64
+ input (dict): includes the data itself and its metadata information.
65
+ """
66
+ pass
67
+
68
+ @abstractmethod
69
+ def forward(self):
70
+ """Run forward pass; called by both functions <optimize_parameters> and <test>."""
71
+ pass
72
+
73
+ @abstractmethod
74
+ def optimize_parameters(self):
75
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
76
+ pass
77
+
78
+ def setup(self, opt):
79
+ """Load and print networks; create schedulers
80
+
81
+ Parameters:
82
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
83
+ """
84
+ if self.isTrain:
85
+ self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
86
+ if not self.isTrain or opt.continue_train:
87
+ load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
88
+ self.load_networks(load_suffix)
89
+ self.print_networks(opt.verbose)
90
+
91
+ def eval(self):
92
+ """Make models eval mode during test time"""
93
+ for name in self.model_names:
94
+ if isinstance(name, str):
95
+ net = getattr(self, 'net' + name)
96
+ net.eval()
97
+
98
+ def test(self):
99
+ """Forward function used in test time.
100
+
101
+ This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
102
+ It also calls <compute_visuals> to produce additional visualization results
103
+ """
104
+ with torch.no_grad():
105
+ self.forward()
106
+ self.compute_visuals()
107
+
108
+ def compute_visuals(self):
109
+ """Calculate additional output images for visdom and HTML visualization"""
110
+ pass
111
+
112
+ def get_image_paths(self):
113
+ """ Return image paths that are used to load current data"""
114
+ return self.image_paths
115
+
116
+ def update_learning_rate(self):
117
+ """Update learning rates for all the networks; called at the end of every epoch"""
118
+ old_lr = self.optimizers[0].param_groups[0]['lr']
119
+ for scheduler in self.schedulers:
120
+ if self.opt.lr_policy == 'plateau':
121
+ scheduler.step(self.metric)
122
+ else:
123
+ scheduler.step()
124
+
125
+ lr = self.optimizers[0].param_groups[0]['lr']
126
+ print('learning rate %.7f -> %.7f' % (old_lr, lr))
127
+
128
+ def get_current_visuals(self):
129
+ """Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
130
+ visual_ret = OrderedDict()
131
+ for name in self.visual_names:
132
+ if isinstance(name, str):
133
+ visual_ret[name] = getattr(self, name)
134
+ return visual_ret
135
+
136
+ def get_current_losses(self):
137
+ """Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
138
+ errors_ret = OrderedDict()
139
+ for name in self.loss_names:
140
+ if isinstance(name, str):
141
+ errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
142
+ return errors_ret
143
+
144
+ def save_networks(self, epoch):
145
+ """Save all the networks to the disk.
146
+
147
+ Parameters:
148
+ epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
149
+ """
150
+ for name in self.model_names:
151
+ if isinstance(name, str):
152
+ save_filename = '%s_net_%s.pth' % (epoch, name)
153
+ save_path = os.path.join(self.save_dir, save_filename)
154
+ net = getattr(self, 'net' + name)
155
+
156
+ if len(self.gpu_ids) > 0 and torch.cuda.is_available():
157
+ torch.save(net.module.cpu().state_dict(), save_path)
158
+ net.cuda(self.gpu_ids[0])
159
+ else:
160
+ torch.save(net.cpu().state_dict(), save_path)
161
+
162
+ def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
163
+ """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
164
+ key = keys[i]
165
+ if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
166
+ if module.__class__.__name__.startswith('InstanceNorm') and \
167
+ (key == 'running_mean' or key == 'running_var'):
168
+ if getattr(module, key) is None:
169
+ state_dict.pop('.'.join(keys))
170
+ if module.__class__.__name__.startswith('InstanceNorm') and \
171
+ (key == 'num_batches_tracked'):
172
+ state_dict.pop('.'.join(keys))
173
+ else:
174
+ self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
175
+
176
+ def load_networks(self, epoch):
177
+ """Load all the networks from the disk.
178
+
179
+ Parameters:
180
+ epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
181
+ """
182
+ for name in self.model_names:
183
+ if isinstance(name, str):
184
+ load_filename = '%s_net_%s.pth' % (epoch, name)
185
+ load_path = os.path.join(self.save_dir, load_filename)
186
+ net = getattr(self, 'net' + name)
187
+ if isinstance(net, torch.nn.DataParallel):
188
+ net = net.module
189
+ print('loading the model from %s' % load_path)
190
+ # if you are using PyTorch newer than 0.4 (e.g., built from
191
+ # GitHub source), you can remove str() on self.device
192
+ state_dict = torch.load(load_path, map_location=str(self.device))
193
+ if hasattr(state_dict, '_metadata'):
194
+ del state_dict._metadata
195
+
196
+ # patch InstanceNorm checkpoints prior to 0.4
197
+ for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
198
+ self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
199
+ net.load_state_dict(state_dict)
200
+
201
+ def print_networks(self, verbose):
202
+ """Print the total number of parameters in the network and (if verbose) network architecture
203
+
204
+ Parameters:
205
+ verbose (bool) -- if verbose: print the network architecture
206
+ """
207
+ print('---------- Networks initialized -------------')
208
+ for name in self.model_names:
209
+ if isinstance(name, str):
210
+ net = getattr(self, 'net' + name)
211
+ num_params = 0
212
+ for param in net.parameters():
213
+ num_params += param.numel()
214
+ if verbose:
215
+ print(net)
216
+ print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
217
+ print('-----------------------------------------------')
218
+
219
+ def set_requires_grad(self, nets, requires_grad=False):
220
+ """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
221
+ Parameters:
222
+ nets (network list) -- a list of networks
223
+ requires_grad (bool) -- whether the networks require gradients or not
224
+ """
225
+ if not isinstance(nets, list):
226
+ nets = [nets]
227
+ for net in nets:
228
+ if net is not None:
229
+ for param in net.parameters():
230
+ param.requires_grad = requires_grad
models/colorization_model.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .pix2pix_model import Pix2PixModel
2
+ import torch
3
+ from skimage import color # used for lab2rgb
4
+ import numpy as np
5
+
6
+
7
+ class ColorizationModel(Pix2PixModel):
8
+ """This is a subclass of Pix2PixModel for image colorization (black & white image -> colorful images).
9
+
10
+ The model training requires '-dataset_model colorization' dataset.
11
+ It trains a pix2pix model, mapping from L channel to ab channels in Lab color space.
12
+ By default, the colorization dataset will automatically set '--input_nc 1' and '--output_nc 2'.
13
+ """
14
+ @staticmethod
15
+ def modify_commandline_options(parser, is_train=True):
16
+ """Add new dataset-specific options, and rewrite default values for existing options.
17
+
18
+ Parameters:
19
+ parser -- original option parser
20
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
21
+
22
+ Returns:
23
+ the modified parser.
24
+
25
+ By default, we use 'colorization' dataset for this model.
26
+ See the original pix2pix paper (https://arxiv.org/pdf/1611.07004.pdf) and colorization results (Figure 9 in the paper)
27
+ """
28
+ Pix2PixModel.modify_commandline_options(parser, is_train)
29
+ parser.set_defaults(dataset_mode='colorization')
30
+ return parser
31
+
32
+ def __init__(self, opt):
33
+ """Initialize the class.
34
+
35
+ Parameters:
36
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
37
+
38
+ For visualization, we set 'visual_names' as 'real_A' (input real image),
39
+ 'real_B_rgb' (ground truth RGB image), and 'fake_B_rgb' (predicted RGB image)
40
+ We convert the Lab image 'real_B' (inherited from Pix2pixModel) to a RGB image 'real_B_rgb'.
41
+ we convert the Lab image 'fake_B' (inherited from Pix2pixModel) to a RGB image 'fake_B_rgb'.
42
+ """
43
+ # reuse the pix2pix model
44
+ Pix2PixModel.__init__(self, opt)
45
+ # specify the images to be visualized.
46
+ self.visual_names = ['real_A', 'real_B_rgb', 'fake_B_rgb']
47
+
48
+ def lab2rgb(self, L, AB):
49
+ """Convert an Lab tensor image to a RGB numpy output
50
+ Parameters:
51
+ L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array)
52
+ AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array)
53
+
54
+ Returns:
55
+ rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array)
56
+ """
57
+ AB2 = AB * 110.0
58
+ L2 = (L + 1.0) * 50.0
59
+ Lab = torch.cat([L2, AB2], dim=1)
60
+ Lab = Lab[0].data.cpu().float().numpy()
61
+ Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
62
+ rgb = color.lab2rgb(Lab) * 255
63
+ return rgb
64
+
65
+ def compute_visuals(self):
66
+ """Calculate additional output images for visdom and HTML visualization"""
67
+ self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B)
68
+ self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B)
models/cycle_gan_model.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import itertools
3
+ from util.image_pool import ImagePool
4
+ from .base_model import BaseModel
5
+ from . import networks
6
+
7
+
8
+ class CycleGANModel(BaseModel):
9
+ """
10
+ This class implements the CycleGAN model, for learning image-to-image translation without paired data.
11
+
12
+ The model training requires '--dataset_mode unaligned' dataset.
13
+ By default, it uses a '--netG resnet_9blocks' ResNet generator,
14
+ a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
15
+ and a least-square GANs objective ('--gan_mode lsgan').
16
+
17
+ CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
18
+ """
19
+ @staticmethod
20
+ def modify_commandline_options(parser, is_train=True):
21
+ """Add new dataset-specific options, and rewrite default values for existing options.
22
+
23
+ Parameters:
24
+ parser -- original option parser
25
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
26
+
27
+ Returns:
28
+ the modified parser.
29
+
30
+ For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
31
+ A (source domain), B (target domain).
32
+ Generators: G_A: A -> B; G_B: B -> A.
33
+ Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
34
+ Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
35
+ Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
36
+ Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
37
+ Dropout is not used in the original CycleGAN paper.
38
+ """
39
+ parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
40
+ if is_train:
41
+ parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
42
+ parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
43
+ parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
44
+
45
+ return parser
46
+
47
+ def __init__(self, opt):
48
+ """Initialize the CycleGAN class.
49
+
50
+ Parameters:
51
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
52
+ """
53
+ BaseModel.__init__(self, opt)
54
+ # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
55
+ self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
56
+ # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
57
+ visual_names_A = ['real_A', 'fake_B', 'rec_A']
58
+ visual_names_B = ['real_B', 'fake_A', 'rec_B']
59
+ if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
60
+ visual_names_A.append('idt_B')
61
+ visual_names_B.append('idt_A')
62
+
63
+ self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
64
+ # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
65
+ if self.isTrain:
66
+ self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
67
+ else: # during test time, only load Gs
68
+ self.model_names = ['G_A', 'G_B']
69
+
70
+ # define networks (both Generators and discriminators)
71
+ # The naming is different from those used in the paper.
72
+ # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
73
+ self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
74
+ not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
75
+ self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
76
+ not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
77
+
78
+ if self.isTrain: # define discriminators
79
+ self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
80
+ opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
81
+ self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
82
+ opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
83
+
84
+ if self.isTrain:
85
+ if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
86
+ assert(opt.input_nc == opt.output_nc)
87
+ self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
88
+ self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
89
+ # define loss functions
90
+ self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
91
+ self.criterionCycle = torch.nn.L1Loss()
92
+ self.criterionIdt = torch.nn.L1Loss()
93
+ # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
94
+ self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
95
+ self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
96
+ self.optimizers.append(self.optimizer_G)
97
+ self.optimizers.append(self.optimizer_D)
98
+
99
+ def set_input(self, input):
100
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
101
+
102
+ Parameters:
103
+ input (dict): include the data itself and its metadata information.
104
+
105
+ The option 'direction' can be used to swap domain A and domain B.
106
+ """
107
+ AtoB = self.opt.direction == 'AtoB'
108
+ self.real_A = input['A' if AtoB else 'B'].to(self.device)
109
+ self.real_B = input['B' if AtoB else 'A'].to(self.device)
110
+ self.image_paths = input['A_paths' if AtoB else 'B_paths']
111
+
112
+ def forward(self):
113
+ """Run forward pass; called by both functions <optimize_parameters> and <test>."""
114
+ self.fake_B = self.netG_A(self.real_A) # G_A(A)
115
+ self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
116
+ self.fake_A = self.netG_B(self.real_B) # G_B(B)
117
+ self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
118
+
119
+ def backward_D_basic(self, netD, real, fake):
120
+ """Calculate GAN loss for the discriminator
121
+
122
+ Parameters:
123
+ netD (network) -- the discriminator D
124
+ real (tensor array) -- real images
125
+ fake (tensor array) -- images generated by a generator
126
+
127
+ Return the discriminator loss.
128
+ We also call loss_D.backward() to calculate the gradients.
129
+ """
130
+ # Real
131
+ pred_real = netD(real)
132
+ loss_D_real = self.criterionGAN(pred_real, True)
133
+ # Fake
134
+ pred_fake = netD(fake.detach())
135
+ loss_D_fake = self.criterionGAN(pred_fake, False)
136
+ # Combined loss and calculate gradients
137
+ loss_D = (loss_D_real + loss_D_fake) * 0.5
138
+ loss_D.backward()
139
+ return loss_D
140
+
141
+ def backward_D_A(self):
142
+ """Calculate GAN loss for discriminator D_A"""
143
+ fake_B = self.fake_B_pool.query(self.fake_B)
144
+ self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
145
+
146
+ def backward_D_B(self):
147
+ """Calculate GAN loss for discriminator D_B"""
148
+ fake_A = self.fake_A_pool.query(self.fake_A)
149
+ self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
150
+
151
+ def backward_G(self):
152
+ """Calculate the loss for generators G_A and G_B"""
153
+ lambda_idt = self.opt.lambda_identity
154
+ lambda_A = self.opt.lambda_A
155
+ lambda_B = self.opt.lambda_B
156
+ # Identity loss
157
+ if lambda_idt > 0:
158
+ # G_A should be identity if real_B is fed: ||G_A(B) - B||
159
+ self.idt_A = self.netG_A(self.real_B)
160
+ self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
161
+ # G_B should be identity if real_A is fed: ||G_B(A) - A||
162
+ self.idt_B = self.netG_B(self.real_A)
163
+ self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
164
+ else:
165
+ self.loss_idt_A = 0
166
+ self.loss_idt_B = 0
167
+
168
+ # GAN loss D_A(G_A(A))
169
+ self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
170
+ # GAN loss D_B(G_B(B))
171
+ self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
172
+ # Forward cycle loss || G_B(G_A(A)) - A||
173
+ self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
174
+ # Backward cycle loss || G_A(G_B(B)) - B||
175
+ self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
176
+ # combined loss and calculate gradients
177
+ self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
178
+ self.loss_G.backward()
179
+
180
+ def optimize_parameters(self):
181
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
182
+ # forward
183
+ self.forward() # compute fake images and reconstruction images.
184
+ # G_A and G_B
185
+ self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
186
+ self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
187
+ self.backward_G() # calculate gradients for G_A and G_B
188
+ self.optimizer_G.step() # update G_A and G_B's weights
189
+ # D_A and D_B
190
+ self.set_requires_grad([self.netD_A, self.netD_B], True)
191
+ self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
192
+ self.backward_D_A() # calculate gradients for D_A
193
+ self.backward_D_B() # calculate graidents for D_B
194
+ self.optimizer_D.step() # update D_A and D_B's weights
models/networks.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.nn import init
4
+ import functools
5
+ from torch.optim import lr_scheduler
6
+
7
+
8
+ ###############################################################################
9
+ # Helper Functions
10
+ ###############################################################################
11
+
12
+
13
+ class Identity(nn.Module):
14
+ def forward(self, x):
15
+ return x
16
+
17
+
18
+ def get_norm_layer(norm_type='instance'):
19
+ """Return a normalization layer
20
+
21
+ Parameters:
22
+ norm_type (str) -- the name of the normalization layer: batch | instance | none
23
+
24
+ For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
25
+ For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
26
+ """
27
+ if norm_type == 'batch':
28
+ norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
29
+ elif norm_type == 'instance':
30
+ norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
31
+ elif norm_type == 'none':
32
+ def norm_layer(x): return Identity()
33
+ else:
34
+ raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
35
+ return norm_layer
36
+
37
+
38
+ def get_scheduler(optimizer, opt):
39
+ """Return a learning rate scheduler
40
+
41
+ Parameters:
42
+ optimizer -- the optimizer of the network
43
+ opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
44
+ opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
45
+
46
+ For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
47
+ and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
48
+ For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
49
+ See https://pytorch.org/docs/stable/optim.html for more details.
50
+ """
51
+ if opt.lr_policy == 'linear':
52
+ def lambda_rule(epoch):
53
+ lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
54
+ return lr_l
55
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
56
+ elif opt.lr_policy == 'step':
57
+ scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
58
+ elif opt.lr_policy == 'plateau':
59
+ scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
60
+ elif opt.lr_policy == 'cosine':
61
+ scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
62
+ else:
63
+ return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
64
+ return scheduler
65
+
66
+
67
+ def init_weights(net, init_type='normal', init_gain=0.02):
68
+ """Initialize network weights.
69
+
70
+ Parameters:
71
+ net (network) -- network to be initialized
72
+ init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
73
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
74
+
75
+ We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
76
+ work better for some applications. Feel free to try yourself.
77
+ """
78
+ def init_func(m): # define the initialization function
79
+ classname = m.__class__.__name__
80
+ if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
81
+ if init_type == 'normal':
82
+ init.normal_(m.weight.data, 0.0, init_gain)
83
+ elif init_type == 'xavier':
84
+ init.xavier_normal_(m.weight.data, gain=init_gain)
85
+ elif init_type == 'kaiming':
86
+ init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
87
+ elif init_type == 'orthogonal':
88
+ init.orthogonal_(m.weight.data, gain=init_gain)
89
+ else:
90
+ raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
91
+ if hasattr(m, 'bias') and m.bias is not None:
92
+ init.constant_(m.bias.data, 0.0)
93
+ elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
94
+ init.normal_(m.weight.data, 1.0, init_gain)
95
+ init.constant_(m.bias.data, 0.0)
96
+
97
+ print('initialize network with %s' % init_type)
98
+ net.apply(init_func) # apply the initialization function <init_func>
99
+
100
+
101
+ def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
102
+ """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
103
+ Parameters:
104
+ net (network) -- the network to be initialized
105
+ init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
106
+ gain (float) -- scaling factor for normal, xavier and orthogonal.
107
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
108
+
109
+ Return an initialized network.
110
+ """
111
+ if len(gpu_ids) > 0:
112
+ assert(torch.cuda.is_available())
113
+ net.to(gpu_ids[0])
114
+ net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
115
+ init_weights(net, init_type, init_gain=init_gain)
116
+ return net
117
+
118
+
119
+ def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
120
+ """Create a generator
121
+
122
+ Parameters:
123
+ input_nc (int) -- the number of channels in input images
124
+ output_nc (int) -- the number of channels in output images
125
+ ngf (int) -- the number of filters in the last conv layer
126
+ netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
127
+ norm (str) -- the name of normalization layers used in the network: batch | instance | none
128
+ use_dropout (bool) -- if use dropout layers.
129
+ init_type (str) -- the name of our initialization method.
130
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
131
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
132
+
133
+ Returns a generator
134
+
135
+ Our current implementation provides two types of generators:
136
+ U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
137
+ The original U-Net paper: https://arxiv.org/abs/1505.04597
138
+
139
+ Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
140
+ Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
141
+ We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
142
+
143
+
144
+ The generator has been initialized by <init_net>. It uses RELU for non-linearity.
145
+ """
146
+ net = None
147
+ norm_layer = get_norm_layer(norm_type=norm)
148
+
149
+ if netG == 'resnet_9blocks':
150
+ net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
151
+ elif netG == 'resnet_6blocks':
152
+ net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
153
+ elif netG == 'unet_128':
154
+ net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
155
+ elif netG == 'unet_256':
156
+ net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
157
+ else:
158
+ raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
159
+ return init_net(net, init_type, init_gain, gpu_ids)
160
+
161
+
162
+ def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
163
+ """Create a discriminator
164
+
165
+ Parameters:
166
+ input_nc (int) -- the number of channels in input images
167
+ ndf (int) -- the number of filters in the first conv layer
168
+ netD (str) -- the architecture's name: basic | n_layers | pixel
169
+ n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
170
+ norm (str) -- the type of normalization layers used in the network.
171
+ init_type (str) -- the name of the initialization method.
172
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
173
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
174
+
175
+ Returns a discriminator
176
+
177
+ Our current implementation provides three types of discriminators:
178
+ [basic]: 'PatchGAN' classifier described in the original pix2pix paper.
179
+ It can classify whether 70×70 overlapping patches are real or fake.
180
+ Such a patch-level discriminator architecture has fewer parameters
181
+ than a full-image discriminator and can work on arbitrarily-sized images
182
+ in a fully convolutional fashion.
183
+
184
+ [n_layers]: With this mode, you can specify the number of conv layers in the discriminator
185
+ with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
186
+
187
+ [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
188
+ It encourages greater color diversity but has no effect on spatial statistics.
189
+
190
+ The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
191
+ """
192
+ net = None
193
+ norm_layer = get_norm_layer(norm_type=norm)
194
+
195
+ if netD == 'basic': # default PatchGAN classifier
196
+ net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
197
+ elif netD == 'n_layers': # more options
198
+ net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
199
+ elif netD == 'pixel': # classify if each pixel is real or fake
200
+ net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
201
+ else:
202
+ raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
203
+ return init_net(net, init_type, init_gain, gpu_ids)
204
+
205
+
206
+ ##############################################################################
207
+ # Classes
208
+ ##############################################################################
209
+ class GANLoss(nn.Module):
210
+ """Define different GAN objectives.
211
+
212
+ The GANLoss class abstracts away the need to create the target label tensor
213
+ that has the same size as the input.
214
+ """
215
+
216
+ def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
217
+ """ Initialize the GANLoss class.
218
+
219
+ Parameters:
220
+ gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
221
+ target_real_label (bool) - - label for a real image
222
+ target_fake_label (bool) - - label of a fake image
223
+
224
+ Note: Do not use sigmoid as the last layer of Discriminator.
225
+ LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
226
+ """
227
+ super(GANLoss, self).__init__()
228
+ self.register_buffer('real_label', torch.tensor(target_real_label))
229
+ self.register_buffer('fake_label', torch.tensor(target_fake_label))
230
+ self.gan_mode = gan_mode
231
+ if gan_mode == 'lsgan':
232
+ self.loss = nn.MSELoss()
233
+ elif gan_mode == 'vanilla':
234
+ self.loss = nn.BCEWithLogitsLoss()
235
+ elif gan_mode in ['wgangp']:
236
+ self.loss = None
237
+ else:
238
+ raise NotImplementedError('gan mode %s not implemented' % gan_mode)
239
+
240
+ def get_target_tensor(self, prediction, target_is_real):
241
+ """Create label tensors with the same size as the input.
242
+
243
+ Parameters:
244
+ prediction (tensor) - - tpyically the prediction from a discriminator
245
+ target_is_real (bool) - - if the ground truth label is for real images or fake images
246
+
247
+ Returns:
248
+ A label tensor filled with ground truth label, and with the size of the input
249
+ """
250
+
251
+ if target_is_real:
252
+ target_tensor = self.real_label
253
+ else:
254
+ target_tensor = self.fake_label
255
+ return target_tensor.expand_as(prediction)
256
+
257
+ def __call__(self, prediction, target_is_real):
258
+ """Calculate loss given Discriminator's output and grount truth labels.
259
+
260
+ Parameters:
261
+ prediction (tensor) - - tpyically the prediction output from a discriminator
262
+ target_is_real (bool) - - if the ground truth label is for real images or fake images
263
+
264
+ Returns:
265
+ the calculated loss.
266
+ """
267
+ if self.gan_mode in ['lsgan', 'vanilla']:
268
+ target_tensor = self.get_target_tensor(prediction, target_is_real)
269
+ loss = self.loss(prediction, target_tensor)
270
+ elif self.gan_mode == 'wgangp':
271
+ if target_is_real:
272
+ loss = -prediction.mean()
273
+ else:
274
+ loss = prediction.mean()
275
+ return loss
276
+
277
+
278
+ def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
279
+ """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
280
+
281
+ Arguments:
282
+ netD (network) -- discriminator network
283
+ real_data (tensor array) -- real images
284
+ fake_data (tensor array) -- generated images from the generator
285
+ device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
286
+ type (str) -- if we mix real and fake data or not [real | fake | mixed].
287
+ constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
288
+ lambda_gp (float) -- weight for this loss
289
+
290
+ Returns the gradient penalty loss
291
+ """
292
+ if lambda_gp > 0.0:
293
+ if type == 'real': # either use real images, fake images, or a linear interpolation of two.
294
+ interpolatesv = real_data
295
+ elif type == 'fake':
296
+ interpolatesv = fake_data
297
+ elif type == 'mixed':
298
+ alpha = torch.rand(real_data.shape[0], 1, device=device)
299
+ alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
300
+ interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
301
+ else:
302
+ raise NotImplementedError('{} not implemented'.format(type))
303
+ interpolatesv.requires_grad_(True)
304
+ disc_interpolates = netD(interpolatesv)
305
+ gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
306
+ grad_outputs=torch.ones(disc_interpolates.size()).to(device),
307
+ create_graph=True, retain_graph=True, only_inputs=True)
308
+ gradients = gradients[0].view(real_data.size(0), -1) # flat the data
309
+ gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
310
+ return gradient_penalty, gradients
311
+ else:
312
+ return 0.0, None
313
+
314
+
315
+ class ResnetGenerator(nn.Module):
316
+ """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
317
+
318
+ We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
319
+ """
320
+
321
+ def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
322
+ """Construct a Resnet-based generator
323
+
324
+ Parameters:
325
+ input_nc (int) -- the number of channels in input images
326
+ output_nc (int) -- the number of channels in output images
327
+ ngf (int) -- the number of filters in the last conv layer
328
+ norm_layer -- normalization layer
329
+ use_dropout (bool) -- if use dropout layers
330
+ n_blocks (int) -- the number of ResNet blocks
331
+ padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
332
+ """
333
+ assert(n_blocks >= 0)
334
+ super(ResnetGenerator, self).__init__()
335
+ if type(norm_layer) == functools.partial:
336
+ use_bias = norm_layer.func == nn.InstanceNorm2d
337
+ else:
338
+ use_bias = norm_layer == nn.InstanceNorm2d
339
+
340
+ model = [nn.ReflectionPad2d(3),
341
+ nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
342
+ norm_layer(ngf),
343
+ nn.ReLU(True)]
344
+
345
+ n_downsampling = 2
346
+ for i in range(n_downsampling): # add downsampling layers
347
+ mult = 2 ** i
348
+ model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
349
+ norm_layer(ngf * mult * 2),
350
+ nn.ReLU(True)]
351
+
352
+ mult = 2 ** n_downsampling
353
+ for i in range(n_blocks): # add ResNet blocks
354
+
355
+ model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
356
+
357
+ for i in range(n_downsampling): # add upsampling layers
358
+ mult = 2 ** (n_downsampling - i)
359
+ model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
360
+ kernel_size=3, stride=2,
361
+ padding=1, output_padding=1,
362
+ bias=use_bias),
363
+ norm_layer(int(ngf * mult / 2)),
364
+ nn.ReLU(True)]
365
+ model += [nn.ReflectionPad2d(3)]
366
+ model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
367
+ model += [nn.Tanh()]
368
+
369
+ self.model = nn.Sequential(*model)
370
+
371
+ def forward(self, input):
372
+ """Standard forward"""
373
+ return self.model(input)
374
+
375
+
376
+ class ResnetBlock(nn.Module):
377
+ """Define a Resnet block"""
378
+
379
+ def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
380
+ """Initialize the Resnet block
381
+
382
+ A resnet block is a conv block with skip connections
383
+ We construct a conv block with build_conv_block function,
384
+ and implement skip connections in <forward> function.
385
+ Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
386
+ """
387
+ super(ResnetBlock, self).__init__()
388
+ self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
389
+
390
+ def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
391
+ """Construct a convolutional block.
392
+
393
+ Parameters:
394
+ dim (int) -- the number of channels in the conv layer.
395
+ padding_type (str) -- the name of padding layer: reflect | replicate | zero
396
+ norm_layer -- normalization layer
397
+ use_dropout (bool) -- if use dropout layers.
398
+ use_bias (bool) -- if the conv layer uses bias or not
399
+
400
+ Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
401
+ """
402
+ conv_block = []
403
+ p = 0
404
+ if padding_type == 'reflect':
405
+ conv_block += [nn.ReflectionPad2d(1)]
406
+ elif padding_type == 'replicate':
407
+ conv_block += [nn.ReplicationPad2d(1)]
408
+ elif padding_type == 'zero':
409
+ p = 1
410
+ else:
411
+ raise NotImplementedError('padding [%s] is not implemented' % padding_type)
412
+
413
+ conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
414
+ if use_dropout:
415
+ conv_block += [nn.Dropout(0.5)]
416
+
417
+ p = 0
418
+ if padding_type == 'reflect':
419
+ conv_block += [nn.ReflectionPad2d(1)]
420
+ elif padding_type == 'replicate':
421
+ conv_block += [nn.ReplicationPad2d(1)]
422
+ elif padding_type == 'zero':
423
+ p = 1
424
+ else:
425
+ raise NotImplementedError('padding [%s] is not implemented' % padding_type)
426
+ conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
427
+
428
+ return nn.Sequential(*conv_block)
429
+
430
+ def forward(self, x):
431
+ """Forward function (with skip connections)"""
432
+ out = x + self.conv_block(x) # add skip connections
433
+ return out
434
+
435
+
436
+ class UnetGenerator(nn.Module):
437
+ """Create a Unet-based generator"""
438
+
439
+ def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
440
+ """Construct a Unet generator
441
+ Parameters:
442
+ input_nc (int) -- the number of channels in input images
443
+ output_nc (int) -- the number of channels in output images
444
+ num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
445
+ image of size 128x128 will become of size 1x1 # at the bottleneck
446
+ ngf (int) -- the number of filters in the last conv layer
447
+ norm_layer -- normalization layer
448
+
449
+ We construct the U-Net from the innermost layer to the outermost layer.
450
+ It is a recursive process.
451
+ """
452
+ super(UnetGenerator, self).__init__()
453
+ # construct unet structure
454
+ unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
455
+ for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
456
+ unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
457
+ # gradually reduce the number of filters from ngf * 8 to ngf
458
+ unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
459
+ unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
460
+ unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
461
+ self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
462
+
463
+ def forward(self, input):
464
+ """Standard forward"""
465
+ return self.model(input)
466
+
467
+
468
+ class UnetSkipConnectionBlock(nn.Module):
469
+ """Defines the Unet submodule with skip connection.
470
+ X -------------------identity----------------------
471
+ |-- downsampling -- |submodule| -- upsampling --|
472
+ """
473
+
474
+ def __init__(self, outer_nc, inner_nc, input_nc=None,
475
+ submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
476
+ """Construct a Unet submodule with skip connections.
477
+
478
+ Parameters:
479
+ outer_nc (int) -- the number of filters in the outer conv layer
480
+ inner_nc (int) -- the number of filters in the inner conv layer
481
+ input_nc (int) -- the number of channels in input images/features
482
+ submodule (UnetSkipConnectionBlock) -- previously defined submodules
483
+ outermost (bool) -- if this module is the outermost module
484
+ innermost (bool) -- if this module is the innermost module
485
+ norm_layer -- normalization layer
486
+ use_dropout (bool) -- if use dropout layers.
487
+ """
488
+ super(UnetSkipConnectionBlock, self).__init__()
489
+ self.outermost = outermost
490
+ if type(norm_layer) == functools.partial:
491
+ use_bias = norm_layer.func == nn.InstanceNorm2d
492
+ else:
493
+ use_bias = norm_layer == nn.InstanceNorm2d
494
+ if input_nc is None:
495
+ input_nc = outer_nc
496
+ downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
497
+ stride=2, padding=1, bias=use_bias)
498
+ downrelu = nn.LeakyReLU(0.2, True)
499
+ downnorm = norm_layer(inner_nc)
500
+ uprelu = nn.ReLU(True)
501
+ upnorm = norm_layer(outer_nc)
502
+
503
+ if outermost:
504
+ upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
505
+ kernel_size=4, stride=2,
506
+ padding=1)
507
+ down = [downconv]
508
+ up = [uprelu, upconv, nn.Tanh()]
509
+ model = down + [submodule] + up
510
+ elif innermost:
511
+ upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
512
+ kernel_size=4, stride=2,
513
+ padding=1, bias=use_bias)
514
+ down = [downrelu, downconv]
515
+ up = [uprelu, upconv, upnorm]
516
+ model = down + up
517
+ else:
518
+ upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
519
+ kernel_size=4, stride=2,
520
+ padding=1, bias=use_bias)
521
+ down = [downrelu, downconv, downnorm]
522
+ up = [uprelu, upconv, upnorm]
523
+
524
+ if use_dropout:
525
+ model = down + [submodule] + up + [nn.Dropout(0.5)]
526
+ else:
527
+ model = down + [submodule] + up
528
+
529
+ self.model = nn.Sequential(*model)
530
+
531
+ def forward(self, x):
532
+ if self.outermost:
533
+ return self.model(x)
534
+ else: # add skip connections
535
+ return torch.cat([x, self.model(x)], 1)
536
+
537
+
538
+ class NLayerDiscriminator(nn.Module):
539
+ """Defines a PatchGAN discriminator"""
540
+
541
+ def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
542
+ """Construct a PatchGAN discriminator
543
+
544
+ Parameters:
545
+ input_nc (int) -- the number of channels in input images
546
+ ndf (int) -- the number of filters in the last conv layer
547
+ n_layers (int) -- the number of conv layers in the discriminator
548
+ norm_layer -- normalization layer
549
+ """
550
+ super(NLayerDiscriminator, self).__init__()
551
+ if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
552
+ use_bias = norm_layer.func == nn.InstanceNorm2d
553
+ else:
554
+ use_bias = norm_layer == nn.InstanceNorm2d
555
+
556
+ kw = 4
557
+ padw = 1
558
+ sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
559
+ nf_mult = 1
560
+ nf_mult_prev = 1
561
+ for n in range(1, n_layers): # gradually increase the number of filters
562
+ nf_mult_prev = nf_mult
563
+ nf_mult = min(2 ** n, 8)
564
+ sequence += [
565
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
566
+ norm_layer(ndf * nf_mult),
567
+ nn.LeakyReLU(0.2, True)
568
+ ]
569
+
570
+ nf_mult_prev = nf_mult
571
+ nf_mult = min(2 ** n_layers, 8)
572
+ sequence += [
573
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
574
+ norm_layer(ndf * nf_mult),
575
+ nn.LeakyReLU(0.2, True)
576
+ ]
577
+
578
+ sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
579
+ self.model = nn.Sequential(*sequence)
580
+
581
+ def forward(self, input):
582
+ """Standard forward."""
583
+ return self.model(input)
584
+
585
+
586
+ class PixelDiscriminator(nn.Module):
587
+ """Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
588
+
589
+ def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
590
+ """Construct a 1x1 PatchGAN discriminator
591
+
592
+ Parameters:
593
+ input_nc (int) -- the number of channels in input images
594
+ ndf (int) -- the number of filters in the last conv layer
595
+ norm_layer -- normalization layer
596
+ """
597
+ super(PixelDiscriminator, self).__init__()
598
+ if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
599
+ use_bias = norm_layer.func == nn.InstanceNorm2d
600
+ else:
601
+ use_bias = norm_layer == nn.InstanceNorm2d
602
+
603
+ self.net = [
604
+ nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
605
+ nn.LeakyReLU(0.2, True),
606
+ nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
607
+ norm_layer(ndf * 2),
608
+ nn.LeakyReLU(0.2, True),
609
+ nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
610
+
611
+ self.net = nn.Sequential(*self.net)
612
+
613
+ def forward(self, input):
614
+ """Standard forward."""
615
+ return self.net(input)
models/pix2pix_model.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from .base_model import BaseModel
3
+ from . import networks
4
+
5
+
6
+ class Pix2PixModel(BaseModel):
7
+ """ This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
8
+
9
+ The model training requires '--dataset_mode aligned' dataset.
10
+ By default, it uses a '--netG unet256' U-Net generator,
11
+ a '--netD basic' discriminator (PatchGAN),
12
+ and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
13
+
14
+ pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
15
+ """
16
+ @staticmethod
17
+ def modify_commandline_options(parser, is_train=True):
18
+ """Add new dataset-specific options, and rewrite default values for existing options.
19
+
20
+ Parameters:
21
+ parser -- original option parser
22
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
23
+
24
+ Returns:
25
+ the modified parser.
26
+
27
+ For pix2pix, we do not use image buffer
28
+ The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
29
+ By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
30
+ """
31
+ # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
32
+ parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
33
+ if is_train:
34
+ parser.set_defaults(pool_size=0, gan_mode='vanilla')
35
+ parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
36
+
37
+ return parser
38
+
39
+ def __init__(self, opt):
40
+ """Initialize the pix2pix class.
41
+
42
+ Parameters:
43
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
44
+ """
45
+ BaseModel.__init__(self, opt)
46
+ # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
47
+ self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
48
+ # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
49
+ self.visual_names = ['real_A', 'fake_B', 'real_B']
50
+ # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
51
+ if self.isTrain:
52
+ self.model_names = ['G', 'D']
53
+ else: # during test time, only load G
54
+ self.model_names = ['G']
55
+ # define networks (both generator and discriminator)
56
+ self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
57
+ not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
58
+
59
+ if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
60
+ self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
61
+ opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
62
+
63
+ if self.isTrain:
64
+ # define loss functions
65
+ self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
66
+ self.criterionL1 = torch.nn.L1Loss()
67
+ # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
68
+ self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
69
+ self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
70
+ self.optimizers.append(self.optimizer_G)
71
+ self.optimizers.append(self.optimizer_D)
72
+
73
+ def set_input(self, input):
74
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
75
+
76
+ Parameters:
77
+ input (dict): include the data itself and its metadata information.
78
+
79
+ The option 'direction' can be used to swap images in domain A and domain B.
80
+ """
81
+ AtoB = self.opt.direction == 'AtoB'
82
+ self.real_A = input['A' if AtoB else 'B'].to(self.device)
83
+ self.real_B = input['B' if AtoB else 'A'].to(self.device)
84
+ self.image_paths = input['A_paths' if AtoB else 'B_paths']
85
+
86
+ def forward(self):
87
+ """Run forward pass; called by both functions <optimize_parameters> and <test>."""
88
+ self.fake_B = self.netG(self.real_A) # G(A)
89
+
90
+ def backward_D(self):
91
+ """Calculate GAN loss for the discriminator"""
92
+ # Fake; stop backprop to the generator by detaching fake_B
93
+ fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
94
+ pred_fake = self.netD(fake_AB.detach())
95
+ self.loss_D_fake = self.criterionGAN(pred_fake, False)
96
+ # Real
97
+ real_AB = torch.cat((self.real_A, self.real_B), 1)
98
+ pred_real = self.netD(real_AB)
99
+ self.loss_D_real = self.criterionGAN(pred_real, True)
100
+ # combine loss and calculate gradients
101
+ self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
102
+ self.loss_D.backward()
103
+
104
+ def backward_G(self):
105
+ """Calculate GAN and L1 loss for the generator"""
106
+ # First, G(A) should fake the discriminator
107
+ fake_AB = torch.cat((self.real_A, self.fake_B), 1)
108
+ pred_fake = self.netD(fake_AB)
109
+ self.loss_G_GAN = self.criterionGAN(pred_fake, True)
110
+ # Second, G(A) = B
111
+ self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
112
+ # combine loss and calculate gradients
113
+ self.loss_G = self.loss_G_GAN + self.loss_G_L1
114
+ self.loss_G.backward()
115
+
116
+ def optimize_parameters(self):
117
+ self.forward() # compute fake images: G(A)
118
+ # update D
119
+ self.set_requires_grad(self.netD, True) # enable backprop for D
120
+ self.optimizer_D.zero_grad() # set D's gradients to zero
121
+ self.backward_D() # calculate gradients for D
122
+ self.optimizer_D.step() # update D's weights
123
+ # update G
124
+ self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
125
+ self.optimizer_G.zero_grad() # set G's gradients to zero
126
+ self.backward_G() # calculate graidents for G
127
+ self.optimizer_G.step() # udpate G's weights
models/template_model.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Model class template
2
+
3
+ This module provides a template for users to implement custom models.
4
+ You can specify '--model template' to use this model.
5
+ The class name should be consistent with both the filename and its model option.
6
+ The filename should be <model>_dataset.py
7
+ The class name should be <Model>Dataset.py
8
+ It implements a simple image-to-image translation baseline based on regression loss.
9
+ Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
10
+ min_<netG> ||netG(data_A) - data_B||_1
11
+ You need to implement the following functions:
12
+ <modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
13
+ <__init__>: Initialize this model class.
14
+ <set_input>: Unpack input data and perform data pre-processing.
15
+ <forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
16
+ <optimize_parameters>: Update network weights; it will be called in every training iteration.
17
+ """
18
+ import torch
19
+ from .base_model import BaseModel
20
+ from . import networks
21
+
22
+
23
+ class TemplateModel(BaseModel):
24
+ @staticmethod
25
+ def modify_commandline_options(parser, is_train=True):
26
+ """Add new model-specific options and rewrite default values for existing options.
27
+
28
+ Parameters:
29
+ parser -- the option parser
30
+ is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
31
+
32
+ Returns:
33
+ the modified parser.
34
+ """
35
+ parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
36
+ if is_train:
37
+ parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
38
+
39
+ return parser
40
+
41
+ def __init__(self, opt):
42
+ """Initialize this model class.
43
+
44
+ Parameters:
45
+ opt -- training/test options
46
+
47
+ A few things can be done here.
48
+ - (required) call the initialization function of BaseModel
49
+ - define loss function, visualization images, model names, and optimizers
50
+ """
51
+ BaseModel.__init__(self, opt) # call the initialization method of BaseModel
52
+ # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
53
+ self.loss_names = ['loss_G']
54
+ # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
55
+ self.visual_names = ['data_A', 'data_B', 'output']
56
+ # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
57
+ # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
58
+ self.model_names = ['G']
59
+ # define networks; you can use opt.isTrain to specify different behaviors for training and test.
60
+ self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
61
+ if self.isTrain: # only defined during training time
62
+ # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
63
+ # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
64
+ self.criterionLoss = torch.nn.L1Loss()
65
+ # define and initialize optimizers. You can define one optimizer for each network.
66
+ # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
67
+ self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
68
+ self.optimizers = [self.optimizer]
69
+
70
+ # Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
71
+
72
+ def set_input(self, input):
73
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
74
+
75
+ Parameters:
76
+ input: a dictionary that contains the data itself and its metadata information.
77
+ """
78
+ AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B
79
+ self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
80
+ self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
81
+ self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
82
+
83
+ def forward(self):
84
+ """Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
85
+ self.output = self.netG(self.data_A) # generate output image given the input data_A
86
+
87
+ def backward(self):
88
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
89
+ # caculate the intermediate results if necessary; here self.output has been computed during function <forward>
90
+ # calculate loss given the input and intermediate results
91
+ self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
92
+ self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
93
+
94
+ def optimize_parameters(self):
95
+ """Update network weights; it will be called in every training iteration."""
96
+ self.forward() # first call forward to calculate intermediate results
97
+ self.optimizer.zero_grad() # clear network G's existing gradients
98
+ self.backward() # calculate gradients for network G
99
+ self.optimizer.step() # update gradients for network G
models/test_model.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .base_model import BaseModel
2
+ from . import networks
3
+
4
+
5
+ class TestModel(BaseModel):
6
+ """ This TesteModel can be used to generate CycleGAN results for only one direction.
7
+ This model will automatically set '--dataset_mode single', which only loads the images from one collection.
8
+
9
+ See the test instruction for more details.
10
+ """
11
+ @staticmethod
12
+ def modify_commandline_options(parser, is_train=True):
13
+ """Add new dataset-specific options, and rewrite default values for existing options.
14
+
15
+ Parameters:
16
+ parser -- original option parser
17
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
18
+
19
+ Returns:
20
+ the modified parser.
21
+
22
+ The model can only be used during test time. It requires '--dataset_mode single'.
23
+ You need to specify the network using the option '--model_suffix'.
24
+ """
25
+ assert not is_train, 'TestModel cannot be used during training time'
26
+ parser.set_defaults(dataset_mode='single')
27
+ parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.')
28
+
29
+ return parser
30
+
31
+ def __init__(self, opt):
32
+ """Initialize the pix2pix class.
33
+
34
+ Parameters:
35
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
36
+ """
37
+ assert(not opt.isTrain)
38
+ BaseModel.__init__(self, opt)
39
+ # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
40
+ self.loss_names = []
41
+ # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
42
+ self.visual_names = ['real', 'fake']
43
+ # self.visual_names = ['fake']
44
+ # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
45
+ self.model_names = ['G' + opt.model_suffix] # only generator is needed.
46
+ self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
47
+ opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
48
+
49
+ # assigns the model to self.netG_[suffix] so that it can be loaded
50
+ # please see <BaseModel.load_networks>
51
+ setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self.
52
+
53
+ def set_input(self, input):
54
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
55
+
56
+ Parameters:
57
+ input: a dictionary that contains the data itself and its metadata information.
58
+
59
+ We need to use 'single_dataset' dataset mode. It only load images from one domain.
60
+ """
61
+ self.real = input.to(self.device)
62
+ # self.image_paths = input['A_paths']
63
+
64
+ def forward(self):
65
+ """Run forward pass."""
66
+ self.fake = self.netG(self.real) # G(real)
67
+
68
+ def optimize_parameters(self):
69
+ """No optimization for test model."""
70
+ pass