Arnaudding001 commited on
Commit
48d26bf
1 Parent(s): 4151959

Create util.py

Browse files
Files changed (1) hide show
  1. util.py +229 -0
util.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ from PIL import Image
4
+ import cv2
5
+ import random
6
+ import math
7
+ import argparse
8
+ import torch
9
+ from torch.utils import data
10
+ from torch.nn import functional as F
11
+ from torch import autograd
12
+ from torch.nn import init
13
+ import torchvision.transforms as transforms
14
+ from model.stylegan.op import conv2d_gradfix
15
+ from model.encoder.encoders.psp_encoders import GradualStyleEncoder
16
+ from model.encoder.align_all_parallel import get_landmark
17
+
18
+ def visualize(img_arr, dpi):
19
+ plt.figure(figsize=(10,10),dpi=dpi)
20
+ plt.imshow(((img_arr.detach().cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8))
21
+ plt.axis('off')
22
+ plt.show()
23
+
24
+ def save_image(img, filename):
25
+ tmp = ((img.detach().cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)
26
+ cv2.imwrite(filename, cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR))
27
+
28
+ def load_image(filename):
29
+ transform = transforms.Compose([
30
+ transforms.ToTensor(),
31
+ transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]),
32
+ ])
33
+
34
+ img = Image.open(filename)
35
+ img = transform(img)
36
+ return img.unsqueeze(dim=0)
37
+
38
+ def data_sampler(dataset, shuffle, distributed):
39
+ if distributed:
40
+ return data.distributed.DistributedSampler(dataset, shuffle=shuffle)
41
+
42
+ if shuffle:
43
+ return data.RandomSampler(dataset)
44
+
45
+ else:
46
+ return data.SequentialSampler(dataset)
47
+
48
+
49
+ def requires_grad(model, flag=True):
50
+ for p in model.parameters():
51
+ p.requires_grad = flag
52
+
53
+
54
+ def accumulate(model1, model2, decay=0.999):
55
+ par1 = dict(model1.named_parameters())
56
+ par2 = dict(model2.named_parameters())
57
+
58
+ for k in par1.keys():
59
+ par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
60
+
61
+
62
+ def sample_data(loader):
63
+ while True:
64
+ for batch in loader:
65
+ yield batch
66
+
67
+
68
+ def d_logistic_loss(real_pred, fake_pred):
69
+ real_loss = F.softplus(-real_pred)
70
+ fake_loss = F.softplus(fake_pred)
71
+
72
+ return real_loss.mean() + fake_loss.mean()
73
+
74
+
75
+ def d_r1_loss(real_pred, real_img):
76
+ with conv2d_gradfix.no_weight_gradients():
77
+ grad_real, = autograd.grad(
78
+ outputs=real_pred.sum(), inputs=real_img, create_graph=True
79
+ )
80
+ grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()
81
+
82
+ return grad_penalty
83
+
84
+
85
+ def g_nonsaturating_loss(fake_pred):
86
+ loss = F.softplus(-fake_pred).mean()
87
+
88
+ return loss
89
+
90
+
91
+ def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
92
+ noise = torch.randn_like(fake_img) / math.sqrt(
93
+ fake_img.shape[2] * fake_img.shape[3]
94
+ )
95
+ grad, = autograd.grad(
96
+ outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
97
+ )
98
+ path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
99
+
100
+ path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
101
+
102
+ path_penalty = (path_lengths - path_mean).pow(2).mean()
103
+
104
+ return path_penalty, path_mean.detach(), path_lengths
105
+
106
+
107
+ def make_noise(batch, latent_dim, n_noise, device):
108
+ if n_noise == 1:
109
+ return torch.randn(batch, latent_dim, device=device)
110
+
111
+ noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
112
+
113
+ return noises
114
+
115
+
116
+ def mixing_noise(batch, latent_dim, prob, device):
117
+ if prob > 0 and random.random() < prob:
118
+ return make_noise(batch, latent_dim, 2, device)
119
+
120
+ else:
121
+ return [make_noise(batch, latent_dim, 1, device)]
122
+
123
+
124
+ def set_grad_none(model, targets):
125
+ for n, p in model.named_parameters():
126
+ if n in targets:
127
+ p.grad = None
128
+
129
+
130
+ def weights_init(m):
131
+ classname = m.__class__.__name__
132
+ if classname.find('BatchNorm2d') != -1:
133
+ if hasattr(m, 'weight') and m.weight is not None:
134
+ init.normal_(m.weight.data, 1.0, 0.02)
135
+ if hasattr(m, 'bias') and m.bias is not None:
136
+ init.constant_(m.bias.data, 0.0)
137
+ elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
138
+ init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
139
+ if hasattr(m, 'bias') and m.bias is not None:
140
+ init.constant_(m.bias.data, 0.0)
141
+
142
+
143
+ def load_psp_standalone(checkpoint_path, device='cuda'):
144
+ ckpt = torch.load(checkpoint_path, map_location='cpu')
145
+ opts = ckpt['opts']
146
+ if 'output_size' not in opts:
147
+ opts['output_size'] = 1024
148
+ opts['n_styles'] = int(math.log(opts['output_size'], 2)) * 2 - 2
149
+ opts = argparse.Namespace(**opts)
150
+ psp = GradualStyleEncoder(50, 'ir_se', opts)
151
+ psp_dict = {k.replace('encoder.', ''): v for k, v in ckpt['state_dict'].items() if k.startswith('encoder.')}
152
+ psp.load_state_dict(psp_dict)
153
+ psp.eval()
154
+ psp = psp.to(device)
155
+ latent_avg = ckpt['latent_avg'].to(device)
156
+
157
+ def add_latent_avg(model, inputs, outputs):
158
+ return outputs + latent_avg.repeat(outputs.shape[0], 1, 1)
159
+
160
+ psp.register_forward_hook(add_latent_avg)
161
+ return psp
162
+
163
+ def get_video_crop_parameter(filepath, predictor, padding=[200,200,200,200]):
164
+ if type(filepath) == str:
165
+ img = dlib.load_rgb_image(filepath)
166
+ else:
167
+ img = filepath
168
+ lm = get_landmark(img, predictor)
169
+ if lm is None:
170
+ return None
171
+ lm_chin = lm[0 : 17] # left-right
172
+ lm_eyebrow_left = lm[17 : 22] # left-right
173
+ lm_eyebrow_right = lm[22 : 27] # left-right
174
+ lm_nose = lm[27 : 31] # top-down
175
+ lm_nostrils = lm[31 : 36] # top-down
176
+ lm_eye_left = lm[36 : 42] # left-clockwise
177
+ lm_eye_right = lm[42 : 48] # left-clockwise
178
+ lm_mouth_outer = lm[48 : 60] # left-clockwise
179
+ lm_mouth_inner = lm[60 : 68] # left-clockwise
180
+
181
+ scale = 64. / (np.mean(lm_eye_right[:,0])-np.mean(lm_eye_left[:,0]))
182
+ center = ((np.mean(lm_eye_right, axis=0)+np.mean(lm_eye_left, axis=0)) / 2) * scale
183
+ h, w = round(img.shape[0] * scale), round(img.shape[1] * scale)
184
+ left = max(round(center[0] - padding[0]), 0) // 8 * 8
185
+ right = min(round(center[0] + padding[1]), w) // 8 * 8
186
+ top = max(round(center[1] - padding[2]), 0) // 8 * 8
187
+ bottom = min(round(center[1] + padding[3]), h) // 8 * 8
188
+ return h,w,top,bottom,left,right,scale
189
+
190
+ def tensor2cv2(img):
191
+ tmp = ((img.cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)
192
+ return cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR)
193
+
194
+ # get parameters from the stylegan and mark them with their layers
195
+ def gather_params(G):
196
+ params = dict(
197
+ [(res, {}) for res in range(18)] + [("others", {})]
198
+ )
199
+ for n, p in sorted(list(G.named_buffers()) + list(G.named_parameters())):
200
+ if n.startswith("convs"):
201
+ layer = int(n.split(".")[1]) + 1
202
+ params[layer][n] = p
203
+ elif n.startswith("to_rgbs"):
204
+ layer = int(n.split(".")[1]) * 2 + 3
205
+ params[layer][n] = p
206
+ elif n.startswith("conv1"):
207
+ params[0][n] = p
208
+ elif n.startswith("to_rgb1"):
209
+ params[1][n] = p
210
+ else:
211
+ params["others"][n] = p
212
+ return params
213
+
214
+ # blend the ffhq stylegan model and the finetuned model for toonify
215
+ # see ``Resolution Dependent GAN Interpolation for Controllable Image Synthesis Between Domains''
216
+ def blend_models(G_low, G_high, weight=[1]*7+[0]*11):
217
+ params_low = gather_params(G_low)
218
+ params_high = gather_params(G_high)
219
+
220
+ for res in range(18):
221
+ for n, p in params_high[res].items():
222
+ params_high[res][n] = params_high[res][n] * (1-weight[res]) + params_low[res][n] * weight[res]
223
+
224
+ state_dict = {}
225
+ for _, p in params_high.items():
226
+ state_dict.update(p)
227
+
228
+ return state_dict
229
+