|
import gc |
|
|
|
import torch |
|
|
|
|
|
def value_map(inputs, min_in, max_in, min_out, max_out): |
|
return (inputs - min_in) * (max_out - min_out) / (max_in - min_in) + min_out |
|
|
|
|
|
def flush(garbage_collect=True): |
|
torch.cuda.empty_cache() |
|
if garbage_collect: |
|
gc.collect() |
|
|
|
|
|
def get_mean_std(tensor): |
|
if len(tensor.shape) == 3: |
|
tensor = tensor.unsqueeze(0) |
|
elif len(tensor.shape) != 4: |
|
raise Exception("Expected tensor of shape (batch_size, channels, width, height)") |
|
mean, variance = torch.mean( |
|
tensor, dim=[2, 3], keepdim=True |
|
), torch.var( |
|
tensor, dim=[2, 3], |
|
keepdim=True |
|
) |
|
std = torch.sqrt(variance + 1e-5) |
|
return mean, std |
|
|
|
|
|
def adain(content_features, style_features): |
|
|
|
|
|
dims = [2, 3] |
|
if len(content_features.shape) == 3: |
|
|
|
|
|
dims = [1] |
|
|
|
|
|
content_mean, content_var = torch.mean(content_features, dim=dims, keepdim=True), torch.var(content_features, |
|
dim=dims, |
|
keepdim=True) |
|
|
|
style_mean, style_var = torch.mean(style_features, dim=dims, keepdim=True), torch.var(style_features, dim=dims, |
|
keepdim=True) |
|
|
|
|
|
content_std = torch.sqrt(content_var + 1e-5) |
|
normalized_content = (content_features - content_mean) / content_std |
|
|
|
|
|
style_std = torch.sqrt(style_var + 1e-5) |
|
stylized_content = normalized_content * style_std + style_mean |
|
|
|
return stylized_content |
|
|