task_type
stringclasses
4 values
code_task
stringclasses
15 values
start_line
int64
4
1.79k
end_line
int64
4
1.8k
before
stringlengths
79
76.1k
between
stringlengths
17
806
after
stringlengths
2
72.6k
reason_categories_output
stringlengths
2
2.24k
horizon_categories_output
stringlengths
83
3.99k
reason_freq_analysis
stringclasses
150 values
horizon_freq_analysis
stringlengths
23
185
infilling_python
GAN_model
175
177
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(']
[" name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,']
[' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 176 is defined at line 160 and has a Medium-Range dependency. Variable 'filters' used at line 176 is defined at line 153 and has a Medium-Range dependency. Variable 'self' used at line 177 is defined at line 160 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 177 is defined at line 157 and has a Medium-Range dependency.
{}
{'Variable Medium-Range': 4}
infilling_python
GAN_model
182
189
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):']
[' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,']
[' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'config' used at line 183 is defined at line 182 and has a Short-Range dependency. Variable 'self' used at line 184 is defined at line 181 and has a Short-Range dependency. Variable 'filters' used at line 184 is defined at line 153 and has a Long-Range dependency. Variable 'self' used at line 185 is defined at line 181 and has a Short-Range dependency. Variable 'kernel_size' used at line 185 is defined at line 156 and has a Medium-Range dependency. Variable 'self' used at line 186 is defined at line 181 and has a Short-Range dependency. Variable 'strides' used at line 186 is defined at line 155 and has a Long-Range dependency. Variable 'self' used at line 187 is defined at line 181 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 187 is defined at line 156 and has a Long-Range dependency. Variable 'self' used at line 188 is defined at line 181 and has a Short-Range dependency. Variable 'bias_initializer' used at line 188 is defined at line 157 and has a Long-Range dependency. Variable 'self' used at line 189 is defined at line 181 and has a Short-Range dependency. Variable 'gain' used at line 189 is defined at line 158 and has a Long-Range dependency.
{}
{'Variable Short-Range': 7, 'Variable Long-Range': 5, 'Variable Medium-Range': 1}
infilling_python
GAN_model
195
195
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,']
[' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),']
[' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'inputs' used at line 195 is defined at line 193 and has a Short-Range dependency. Variable 'self' used at line 195 is defined at line 193 and has a Short-Range dependency. Variable 'strides' used at line 195 is defined at line 155 and has a Long-Range dependency.
{}
{'Variable Short-Range': 2, 'Variable Long-Range': 1}
infilling_python
GAN_model
194
196
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):']
[' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,']
[" padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 194 is imported at line 16 and has a Long-Range dependency. Variable 'inputs' used at line 194 is defined at line 193 and has a Short-Range dependency. Variable 'self' used at line 194 is defined at line 193 and has a Short-Range dependency. Variable 'scale' used at line 194 is defined at line 163 and has a Long-Range dependency. Variable 'w' used at line 194 is defined at line 165 and has a Medium-Range dependency. Variable 'inputs' used at line 195 is defined at line 193 and has a Short-Range dependency. Variable 'self' used at line 195 is defined at line 193 and has a Short-Range dependency. Variable 'strides' used at line 195 is defined at line 155 and has a Long-Range dependency. Variable 'self' used at line 196 is defined at line 193 and has a Short-Range dependency. Variable 'strides' used at line 196 is defined at line 155 and has a Long-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 5, 'Variable Long-Range': 3, 'Variable Medium-Range': 1}
infilling_python
GAN_model
199
199
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '']
[' x = x + self.b']
['', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 199 is defined at line 193 and has a Short-Range dependency. Variable 'b' used at line 199 is defined at line 174 and has a Medium-Range dependency. Variable 'x' used at line 199 is defined at line 194 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2, 'Variable Medium-Range': 1}
infilling_python
GAN_model
211
216
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ']
[' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain']
[' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Class 'EqualizedDense' used at line 211 is defined at line 203 and has a Short-Range dependency. Variable 'self' used at line 211 is defined at line 204 and has a Short-Range dependency. Variable 'self' used at line 213 is defined at line 204 and has a Short-Range dependency. Variable 'units' used at line 213 is defined at line 205 and has a Short-Range dependency. Variable 'self' used at line 214 is defined at line 204 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 214 is defined at line 206 and has a Short-Range dependency. Variable 'self' used at line 215 is defined at line 204 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 215 is defined at line 207 and has a Short-Range dependency. Variable 'self' used at line 216 is defined at line 204 and has a Medium-Range dependency. Variable 'gain' used at line 216 is defined at line 208 and has a Short-Range dependency.
{}
{'Class Short-Range': 1, 'Variable Short-Range': 7, 'Variable Medium-Range': 2}
infilling_python
GAN_model
221
223
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ']
[' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)']
[' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'input_shape' used at line 221 is defined at line 219 and has a Short-Range dependency. Variable 'self' used at line 223 is defined at line 219 and has a Short-Range dependency. Library 'tf' used at line 223 is imported at line 16 and has a Long-Range dependency. Variable 'n_channels' used at line 223 is defined at line 221 and has a Short-Range dependency. Variable 'gain' used at line 223 is defined at line 216 and has a Short-Range dependency.
{}
{'Variable Short-Range': 4, 'Library Long-Range': 1}
infilling_python
GAN_model
223
223
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ']
[' self.scale = tf.math.sqrt(self.gain/n_channels)']
[' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 223 is defined at line 219 and has a Short-Range dependency. Library 'tf' used at line 223 is imported at line 16 and has a Long-Range dependency. Variable 'n_channels' used at line 223 is defined at line 221 and has a Short-Range dependency. Variable 'gain' used at line 223 is defined at line 216 and has a Short-Range dependency.
{}
{'Variable Short-Range': 3, 'Library Long-Range': 1}
infilling_python
GAN_model
226
231
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(']
[" name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'n_channels' used at line 227 is defined at line 221 and has a Short-Range dependency. Variable 'self' used at line 228 is defined at line 219 and has a Short-Range dependency. Variable 'units' used at line 228 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 229 is defined at line 219 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 229 is defined at line 206 and has a Medium-Range dependency. Library 'tf' used at line 231 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Short-Range': 3, 'Variable Medium-Range': 2, 'Library Long-Range': 1}
infilling_python
GAN_model
234
238
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(']
[" name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 235 is defined at line 219 and has a Medium-Range dependency. Variable 'units' used at line 235 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 236 is defined at line 219 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 236 is defined at line 207 and has a Medium-Range dependency. Library 'tf' used at line 238 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Medium-Range': 4, 'Library Long-Range': 1}
infilling_python
GAN_model
221
238
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ']
[' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'input_shape' used at line 221 is defined at line 219 and has a Short-Range dependency. Variable 'self' used at line 223 is defined at line 219 and has a Short-Range dependency. Library 'tf' used at line 223 is imported at line 16 and has a Long-Range dependency. Variable 'n_channels' used at line 223 is defined at line 221 and has a Short-Range dependency. Variable 'gain' used at line 223 is defined at line 216 and has a Short-Range dependency. Variable 'self' used at line 225 is defined at line 219 and has a Short-Range dependency. Variable 'n_channels' used at line 227 is defined at line 221 and has a Short-Range dependency. Variable 'self' used at line 228 is defined at line 219 and has a Short-Range dependency. Variable 'units' used at line 228 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 229 is defined at line 219 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 229 is defined at line 206 and has a Medium-Range dependency. Library 'tf' used at line 231 is imported at line 16 and has a Long-Range dependency. Variable 'self' used at line 233 is defined at line 219 and has a Medium-Range dependency. Variable 'self' used at line 235 is defined at line 219 and has a Medium-Range dependency. Variable 'units' used at line 235 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 236 is defined at line 219 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 236 is defined at line 207 and has a Medium-Range dependency. Library 'tf' used at line 238 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Short-Range': 8, 'Library Long-Range': 3, 'Variable Medium-Range': 7}
infilling_python
GAN_model
251
251
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):']
[' return tf.matmul(inputs,self.scale*self.w) + self.b']
['', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 251 is imported at line 16 and has a Long-Range dependency. Variable 'inputs' used at line 251 is defined at line 250 and has a Short-Range dependency. Variable 'self' used at line 251 is defined at line 250 and has a Short-Range dependency. Variable 'scale' used at line 251 is defined at line 223 and has a Medium-Range dependency. Variable 'w' used at line 251 is defined at line 225 and has a Medium-Range dependency. Variable 'b' used at line 251 is defined at line 233 and has a Medium-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2, 'Variable Medium-Range': 3}
infilling_python
GAN_model
285
288
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))']
[' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)']
[' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Class 'EqualizedConv2DTranspose' used at line 285 is defined at line 141 and has a Long-Range dependency. Variable 'filters' used at line 285 is defined at line 281 and has a Short-Range dependency. Variable 'kernel_init' used at line 288 is defined at line 282 and has a Short-Range dependency. Variable 'inputs' used at line 288 is defined at line 284 and has a Short-Range dependency.
{}
{'Class Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
GAN_model
292
294
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ']
[' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)']
[' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'kernel_init' used at line 294 is defined at line 282 and has a Medium-Range dependency. Variable 'x' used at line 294 is defined at line 290 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
316
322
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(']
[' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init']
['):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
null
{}
null
infilling_python
GAN_model
334
337
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,']
[' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ']
[' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'kernel_init' used at line 336 is defined at line 322 and has a Medium-Range dependency. Variable 'x' used at line 336 is defined at line 333 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
336
337
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),']
[' kernel_initializer=kernel_init)(x)))', ' ']
[' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'kernel_init' used at line 336 is defined at line 322 and has a Medium-Range dependency. Variable 'x' used at line 336 is defined at line 333 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
359
365
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,']
[' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ']
[' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 362 is imported at line 11 and has a Long-Range dependency. Variable 'filters' used at line 362 is defined at line 359 and has a Short-Range dependency. Function 'minibatch_stddev_layer' used at line 364 is defined at line 42 and has a Long-Range dependency. Variable 'inputs' used at line 364 is defined at line 362 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2, 'Function Long-Range': 1}
infilling_python
GAN_model
367
367
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ']
[' kernel_size=(3,3), ']
[' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
null
{}
null
infilling_python
GAN_model
375
380
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),']
[' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)']
[' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'kernel_init' used at line 375 is defined at line 360 and has a Medium-Range dependency. Variable 'x' used at line 375 is defined at line 370 and has a Short-Range dependency. Variable 'act_func' used at line 378 is defined at line 358 and has a Medium-Range dependency. Variable 'x' used at line 378 is defined at line 372 and has a Short-Range dependency. Library 'tf' used at line 380 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 380 is defined at line 362 and has a Medium-Range dependency. Variable 'x' used at line 380 is defined at line 378 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 3, 'Variable Short-Range': 3, 'Library Long-Range': 1}
infilling_python
GAN_model
380
380
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ']
[' model = tf.keras.models.Model(inputs, x)']
[' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 380 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 380 is defined at line 362 and has a Medium-Range dependency. Variable 'x' used at line 380 is defined at line 378 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
380
381
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ']
[' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 380 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 380 is defined at line 362 and has a Medium-Range dependency. Variable 'x' used at line 380 is defined at line 378 and has a Short-Range dependency. Variable 'model' used at line 381 is defined at line 380 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 2}
infilling_python
GAN_model
405
417
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):']
[' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)']
[' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 405 is imported at line 11 and has a Long-Range dependency. Variable 'image_shape' used at line 405 is defined at line 402 and has a Short-Range dependency. Variable 'filters1' used at line 405 is defined at line 400 and has a Short-Range dependency. Variable 'act_func' used at line 407 is defined at line 398 and has a Short-Range dependency. Class 'EqualizedConv2D' used at line 407 is defined at line 83 and has a Long-Range dependency. Variable 'filters1' used at line 407 is defined at line 400 and has a Short-Range dependency. Variable 'kernel_init' used at line 410 is defined at line 403 and has a Short-Range dependency. Variable 'inputs' used at line 410 is defined at line 405 and has a Short-Range dependency. Variable 'act_func' used at line 411 is defined at line 398 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 411 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 411 is defined at line 401 and has a Short-Range dependency. Variable 'kernel_init' used at line 414 is defined at line 403 and has a Medium-Range dependency. Variable 'x' used at line 414 is defined at line 407 and has a Short-Range dependency. Variable 'downsample_func' used at line 415 is defined at line 399 and has a Medium-Range dependency. Variable 'x' used at line 415 is defined at line 411 and has a Short-Range dependency. Library 'tf' used at line 417 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 417 is defined at line 405 and has a Medium-Range dependency. Variable 'x' used at line 417 is defined at line 415 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 10, 'Class Long-Range': 2, 'Variable Medium-Range': 4}
infilling_python
GAN_model
405
410
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):']
[' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))']
[' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 405 is imported at line 11 and has a Long-Range dependency. Variable 'image_shape' used at line 405 is defined at line 402 and has a Short-Range dependency. Variable 'filters1' used at line 405 is defined at line 400 and has a Short-Range dependency. Variable 'act_func' used at line 407 is defined at line 398 and has a Short-Range dependency. Class 'EqualizedConv2D' used at line 407 is defined at line 83 and has a Long-Range dependency. Variable 'filters1' used at line 407 is defined at line 400 and has a Short-Range dependency. Variable 'kernel_init' used at line 410 is defined at line 403 and has a Short-Range dependency. Variable 'inputs' used at line 410 is defined at line 405 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 6, 'Class Long-Range': 1}
infilling_python
GAN_model
410
410
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),']
[' kernel_initializer=kernel_init)(inputs))']
[' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'kernel_init' used at line 410 is defined at line 403 and has a Short-Range dependency. Variable 'inputs' used at line 410 is defined at line 405 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
infilling_python
GAN_model
411
417
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))']
[' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)']
[' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'act_func' used at line 411 is defined at line 398 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 411 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 411 is defined at line 401 and has a Short-Range dependency. Variable 'kernel_init' used at line 414 is defined at line 403 and has a Medium-Range dependency. Variable 'x' used at line 414 is defined at line 407 and has a Short-Range dependency. Variable 'downsample_func' used at line 415 is defined at line 399 and has a Medium-Range dependency. Variable 'x' used at line 415 is defined at line 411 and has a Short-Range dependency. Library 'tf' used at line 417 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 417 is defined at line 405 and has a Medium-Range dependency. Variable 'x' used at line 417 is defined at line 415 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 4, 'Class Long-Range': 1, 'Variable Short-Range': 4, 'Library Long-Range': 1}
infilling_python
GAN_model
434
434
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ']
[' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ']
[' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 434 is imported at line 11 and has a Long-Range dependency. Variable 'input_shape' used at line 434 is defined at line 432 and has a Short-Range dependency. Variable 'filters' used at line 434 is defined at line 432 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
infilling_python
GAN_model
437
438
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ']
[' x = top(inputs)', ' x = bottom(x)']
['', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 437}, {'reason_category': 'If Body', 'usage_line': 438}]
Variable 'top' used at line 437 is defined at line 432 and has a Short-Range dependency. Variable 'inputs' used at line 437 is defined at line 434 and has a Short-Range dependency. Variable 'bottom' used at line 438 is defined at line 432 and has a Short-Range dependency. Variable 'x' used at line 438 is defined at line 437 and has a Short-Range dependency.
{'If Body': 2}
{'Variable Short-Range': 4}
infilling_python
GAN_model
443
443
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)']
[' x = x+h']
[' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'Else Reasoning', 'usage_line': 443}]
Variable 'h' used at line 443 is defined at line 441 and has a Short-Range dependency. Variable 'x' used at line 443 is defined at line 442 and has a Short-Range dependency.
{'Else Reasoning': 1}
{'Variable Short-Range': 2}
infilling_python
GAN_model
446
446
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ']
[' model = tf.keras.models.Model(inputs, x)']
[' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 446 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 446 is defined at line 434 and has a Medium-Range dependency. Variable 'x' used at line 446 is defined at line 444 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
479
481
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ']
[' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))']
[' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'kernel_init' used at line 481 is defined at line 472 and has a Short-Range dependency. Variable 'inputs' used at line 481 is defined at line 475 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
infilling_python
GAN_model
478
482
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ']
[' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)']
[' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'act_func' used at line 478 is defined at line 467 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 478 is defined at line 83 and has a Long-Range dependency. Variable 'filters1' used at line 478 is defined at line 464 and has a Medium-Range dependency. Variable 'kernel_init' used at line 481 is defined at line 472 and has a Short-Range dependency. Variable 'inputs' used at line 481 is defined at line 475 and has a Short-Range dependency. Variable 'top' used at line 482 is defined at line 464 and has a Medium-Range dependency.
{}
{'Variable Medium-Range': 3, 'Class Long-Range': 1, 'Variable Short-Range': 2}
infilling_python
GAN_model
486
489
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ']
[' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))']
['', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 486}, {'reason_category': 'If Body', 'usage_line': 487}, {'reason_category': 'If Body', 'usage_line': 488}, {'reason_category': 'If Body', 'usage_line': 489}]
Variable 'act_func' used at line 486 is defined at line 467 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 486 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 486 is defined at line 471 and has a Medium-Range dependency. Variable 'kernel_init' used at line 489 is defined at line 472 and has a Medium-Range dependency. Variable 'h' used at line 489 is defined at line 484 and has a Short-Range dependency.
{'If Body': 4}
{'Variable Medium-Range': 3, 'Class Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
492
492
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x']
[' w, _ = center(fade_in)']
[' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 492}]
Variable 'center' used at line 492 is defined at line 465 and has a Medium-Range dependency. Variable 'fade_in' used at line 492 is defined at line 491 and has a Short-Range dependency.
{'If Body': 1}
{'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
498
501
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:']
[' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)']
[' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 498}, {'reason_category': 'If Body', 'usage_line': 499}, {'reason_category': 'If Body', 'usage_line': 500}, {'reason_category': 'If Body', 'usage_line': 501}]
Class 'EqualizedConv2D' used at line 498 is defined at line 83 and has a Long-Range dependency. Variable 'kernel_init' used at line 501 is defined at line 472 and has a Medium-Range dependency. Variable 'x' used at line 501 is defined at line 495 and has a Short-Range dependency.
{'If Body': 4}
{'Class Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
503
503
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ']
[' fade_in = tf.math.tanh(fade_in)']
[' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 503}]
Library 'tf' used at line 503 is imported at line 15 and has a Long-Range dependency. Variable 'fade_in' used at line 503 is defined at line 498 and has a Short-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
506
508
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ']
[' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)']
[' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'Else Reasoning', 'usage_line': 506}, {'reason_category': 'Else Reasoning', 'usage_line': 507}, {'reason_category': 'Else Reasoning', 'usage_line': 508}]
Variable 'kernel_init' used at line 508 is defined at line 472 and has a Long-Range dependency. Variable 'upsample' used at line 508 is defined at line 495 and has a Medium-Range dependency.
{'Else Reasoning': 3}
{'Variable Long-Range': 1, 'Variable Medium-Range': 1}
infilling_python
GAN_model
510
513
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ']
[' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)']
[' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'Else Reasoning', 'usage_line': 510}, {'reason_category': 'Else Reasoning', 'usage_line': 511}, {'reason_category': 'Else Reasoning', 'usage_line': 512}, {'reason_category': 'Else Reasoning', 'usage_line': 513}]
Class 'EqualizedConv2D' used at line 510 is defined at line 83 and has a Long-Range dependency. Variable 'kernel_init' used at line 513 is defined at line 472 and has a Long-Range dependency. Variable 'x' used at line 513 is defined at line 495 and has a Medium-Range dependency.
{'Else Reasoning': 4}
{'Class Long-Range': 1, 'Variable Long-Range': 1, 'Variable Medium-Range': 1}
infilling_python
GAN_model
515
515
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ']
[' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)']
[' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'Else Reasoning', 'usage_line': 515}]
Library 'tf' used at line 515 is imported at line 15 and has a Long-Range dependency. Variable 'alpha' used at line 515 is defined at line 476 and has a Long-Range dependency. Variable 'upsample' used at line 515 is defined at line 505 and has a Short-Range dependency. Variable 'x' used at line 515 is defined at line 510 and has a Short-Range dependency.
{'Else Reasoning': 1}
{'Library Long-Range': 1, 'Variable Long-Range': 1, 'Variable Short-Range': 2}
infilling_python
GAN_model
548
551
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ']
[' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)']
[' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Class 'EqualizedConv2D' used at line 548 is defined at line 83 and has a Long-Range dependency. Variable 'filters' used at line 548 is defined at line 542 and has a Short-Range dependency. Variable 'kernel_init' used at line 551 is defined at line 541 and has a Short-Range dependency. Variable 'x' used at line 551 is defined at line 546 and has a Short-Range dependency.
{}
{'Class Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
GAN_model
591
594
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ']
[' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))']
[' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'act_func' used at line 591 is defined at line 582 and has a Short-Range dependency. Class 'EqualizedConv2D' used at line 591 is defined at line 83 and has a Long-Range dependency. Variable 'filters1' used at line 591 is defined at line 584 and has a Short-Range dependency. Variable 'kernel_init' used at line 594 is defined at line 587 and has a Short-Range dependency. Variable 'inputs' used at line 594 is defined at line 589 and has a Short-Range dependency.
{}
{'Variable Short-Range': 4, 'Class Long-Range': 1}
infilling_python
GAN_model
599
599
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))']
[' x = downsample_func(x)']
[' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'downsample_func' used at line 599 is defined at line 583 and has a Medium-Range dependency. Variable 'x' used at line 599 is defined at line 595 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
601
601
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ']
[' model = tf.keras.models.Model(inputs, x)']
[' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 601 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 601 is defined at line 589 and has a Medium-Range dependency. Variable 'x' used at line 601 is defined at line 599 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
599
601
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))']
[' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)']
[' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'downsample_func' used at line 599 is defined at line 583 and has a Medium-Range dependency. Variable 'x' used at line 599 is defined at line 595 and has a Short-Range dependency. Library 'tf' used at line 601 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 601 is defined at line 589 and has a Medium-Range dependency. Variable 'x' used at line 601 is defined at line 599 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 2, 'Variable Short-Range': 2, 'Library Long-Range': 1}
infilling_python
GAN_model
620
620
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ']
[' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))']
[' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 620 is imported at line 11 and has a Long-Range dependency. Variable 'input_shape' used at line 620 is defined at line 618 and has a Short-Range dependency. Variable 'filter2' used at line 620 is defined at line 618 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
infilling_python
GAN_model
622
622
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ']
[' x = top(inputs)']
[' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'top' used at line 622 is defined at line 618 and has a Short-Range dependency. Variable 'inputs' used at line 622 is defined at line 620 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
infilling_python
GAN_model
620
625
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ']
[' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)']
[' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 620 is imported at line 11 and has a Long-Range dependency. Variable 'input_shape' used at line 620 is defined at line 618 and has a Short-Range dependency. Variable 'filter2' used at line 620 is defined at line 618 and has a Short-Range dependency. Variable 'top' used at line 622 is defined at line 618 and has a Short-Range dependency. Variable 'inputs' used at line 622 is defined at line 620 and has a Short-Range dependency. Variable 'bottom' used at line 623 is defined at line 618 and has a Short-Range dependency. Variable 'x' used at line 623 is defined at line 622 and has a Short-Range dependency. Library 'tf' used at line 625 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 625 is defined at line 620 and has a Short-Range dependency. Variable 'x' used at line 625 is defined at line 623 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 8}
infilling_python
GAN_model
654
657
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ']
[' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))']
[' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'act_func' used at line 654 is defined at line 643 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 654 is defined at line 83 and has a Long-Range dependency. Variable 'filters1' used at line 654 is defined at line 646 and has a Short-Range dependency. Variable 'kernel_init' used at line 657 is defined at line 648 and has a Short-Range dependency. Variable 'inputs' used at line 657 is defined at line 651 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Class Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
GAN_model
660
660
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:']
[' h = downsample_func(inputs)']
[' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 660}]
Variable 'downsample_func' used at line 660 is defined at line 644 and has a Medium-Range dependency. Variable 'inputs' used at line 660 is defined at line 651 and has a Short-Range dependency.
{'If Body': 1}
{'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
662
665
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ']
[' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))']
['', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 662}, {'reason_category': 'If Body', 'usage_line': 663}, {'reason_category': 'If Body', 'usage_line': 664}, {'reason_category': 'If Body', 'usage_line': 665}]
Variable 'act_func' used at line 662 is defined at line 643 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 662 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 662 is defined at line 646 and has a Medium-Range dependency. Variable 'kernel_init' used at line 665 is defined at line 648 and has a Medium-Range dependency. Variable 'h' used at line 665 is defined at line 660 and has a Short-Range dependency.
{'If Body': 4}
{'Variable Medium-Range': 3, 'Class Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
667
669
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '']
[' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)']
[' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 667}, {'reason_category': 'If Body', 'usage_line': 668}, {'reason_category': 'If Body', 'usage_line': 669}]
Variable 'alpha' used at line 667 is defined at line 652 and has a Medium-Range dependency. Variable 'h' used at line 667 is defined at line 662 and has a Short-Range dependency. Variable 'x' used at line 667 is defined at line 658 and has a Short-Range dependency. Variable 'bottom' used at line 669 is defined at line 642 and has a Medium-Range dependency. Variable 'fade_in' used at line 669 is defined at line 667 and has a Short-Range dependency.
{'If Body': 3}
{'Variable Medium-Range': 2, 'Variable Short-Range': 3}
infilling_python
GAN_model
667
672
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '']
[' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ']
['', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 667}, {'reason_category': 'If Body', 'usage_line': 668}, {'reason_category': 'If Body', 'usage_line': 669}]
Variable 'alpha' used at line 667 is defined at line 652 and has a Medium-Range dependency. Variable 'h' used at line 667 is defined at line 662 and has a Short-Range dependency. Variable 'x' used at line 667 is defined at line 658 and has a Short-Range dependency. Variable 'bottom' used at line 669 is defined at line 642 and has a Medium-Range dependency. Variable 'fade_in' used at line 669 is defined at line 667 and has a Short-Range dependency. Library 'tf' used at line 671 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 671 is defined at line 651 and has a Medium-Range dependency. Variable 'alpha' used at line 671 is defined at line 652 and has a Medium-Range dependency. Variable 'x' used at line 671 is defined at line 669 and has a Short-Range dependency. Variable 'model' used at line 672 is defined at line 671 and has a Short-Range dependency.
{'If Body': 3}
{'Variable Medium-Range': 4, 'Variable Short-Range': 5, 'Library Long-Range': 1}
infilling_python
GAN_model
671
671
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ']
[' model = tf.keras.models.Model([inputs, alpha], x)']
[' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 671 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 671 is defined at line 651 and has a Medium-Range dependency. Variable 'alpha' used at line 671 is defined at line 652 and has a Medium-Range dependency. Variable 'x' used at line 671 is defined at line 669 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 2, 'Variable Short-Range': 1}
infilling_python
Timeseries_Clustering
23
23
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)']
['beta=pca.fit_transform(norm_ret_df.T)']
['df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'pca' used at line 23 is defined at line 22 and has a Short-Range dependency. Variable 'norm_ret_df' used at line 23 is defined at line 20 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
infilling_python
Timeseries_Clustering
23
25
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)']
['beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values']
['print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'pca' used at line 23 is defined at line 22 and has a Short-Range dependency. Variable 'norm_ret_df' used at line 23 is defined at line 20 and has a Short-Range dependency. Library 'pd' used at line 24 is imported at line 1 and has a Medium-Range dependency. Variable 'beta' used at line 24 is defined at line 23 and has a Short-Range dependency. Variable 'df_beta' used at line 25 is defined at line 24 and has a Short-Range dependency.
{}
{'Variable Short-Range': 4, 'Library Medium-Range': 1}
infilling_python
Timeseries_Clustering
29
29
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ']
['X = preprocessing.StandardScaler().fit_transform(stock_pca)']
['', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Library 'preprocessing' used at line 29 is imported at line 8 and has a Medium-Range dependency. Variable 'stock_pca' used at line 29 is defined at line 25 and has a Short-Range dependency.
{}
{'Library Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
Timeseries_Clustering
33
33
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3']
['dbscan = DBSCAN(eps=2, min_samples=3)']
['dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Library 'DBSCAN' used at line 33 is imported at line 6 and has a Medium-Range dependency.
{}
{'Library Medium-Range': 1}
infilling_python
Timeseries_Clustering
33
36
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3']
['dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)']
['print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Library 'DBSCAN' used at line 33 is imported at line 6 and has a Medium-Range dependency. Variable 'dbscan' used at line 34 is defined at line 33 and has a Short-Range dependency. Variable 'X' used at line 34 is defined at line 29 and has a Short-Range dependency. Variable 'dbscan' used at line 35 is defined at line 33 and has a Short-Range dependency. Variable 'labels' used at line 36 is defined at line 35 and has a Short-Range dependency.
{}
{'Library Medium-Range': 1, 'Variable Short-Range': 4}
infilling_python
Timeseries_Clustering
40
40
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df']
["df_beta['labels']=labels"]
['df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'df_beta' used at line 40 is defined at line 24 and has a Medium-Range dependency. Variable 'labels' used at line 40 is defined at line 35 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
Timeseries_Clustering
40
43
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df']
["df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])"]
['print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'df_beta' used at line 40 is defined at line 24 and has a Medium-Range dependency. Variable 'labels' used at line 40 is defined at line 35 and has a Short-Range dependency. Variable 'df_beta' used at line 41 is defined at line 24 and has a Medium-Range dependency. Variable 'norm_ret_df' used at line 42 is defined at line 20 and has a Medium-Range dependency. Variable 'df_beta' used at line 43 is defined at line 42 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 3, 'Variable Short-Range': 2}
infilling_python
Timeseries_Clustering
47
52
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes']
['k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]"]
['', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 50}, {'reason_category': 'Loop Body', 'usage_line': 51}, {'reason_category': 'Loop Body', 'usage_line': 52}]
Library 'np' used at line 47 is imported at line 2 and has a Long-Range dependency. Variable 'n_clusters_' used at line 47 is defined at line 36 and has a Medium-Range dependency. Variable 'k_list' used at line 48 is defined at line 47 and has a Short-Range dependency. Variable 'k_list' used at line 50 is defined at line 47 and has a Short-Range dependency. Variable 'd' used at line 51 is defined at line 49 and has a Short-Range dependency. Variable 'k' used at line 51 is part of a Loop defined at line 50 and has a Short-Range dependency. Library 'pd' used at line 51 is imported at line 1 and has a Long-Range dependency. Variable 'd' used at line 52 is defined at line 49 and has a Short-Range dependency. Variable 'k' used at line 52 is part of a Loop defined at line 50 and has a Short-Range dependency. Variable 'df_beta' used at line 52 is defined at line 42 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 2}
{'Library Long-Range': 2, 'Variable Medium-Range': 1, 'Variable Short-Range': 5, 'Variable Loop Short-Range': 2}
infilling_python
Timeseries_Clustering
49
52
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list']
['d = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]"]
['', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 50}, {'reason_category': 'Loop Body', 'usage_line': 51}, {'reason_category': 'Loop Body', 'usage_line': 52}]
Variable 'k_list' used at line 50 is defined at line 47 and has a Short-Range dependency. Variable 'd' used at line 51 is defined at line 49 and has a Short-Range dependency. Variable 'k' used at line 51 is part of a Loop defined at line 50 and has a Short-Range dependency. Library 'pd' used at line 51 is imported at line 1 and has a Long-Range dependency. Variable 'd' used at line 52 is defined at line 49 and has a Short-Range dependency. Variable 'k' used at line 52 is part of a Loop defined at line 50 and has a Short-Range dependency. Variable 'df_beta' used at line 52 is defined at line 42 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 2}
{'Variable Short-Range': 4, 'Variable Loop Short-Range': 2, 'Library Long-Range': 1}
infilling_python
Timeseries_Clustering
63
63
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)']
['kmeans.fit(X)']
['label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'kmeans' used at line 63 is defined at line 62 and has a Short-Range dependency. Variable 'X' used at line 63 is defined at line 29 and has a Long-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Long-Range': 1}
infilling_python
Timeseries_Clustering
62
64
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time']
['kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ']
['center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Library 'KMeans' used at line 62 is imported at line 9 and has a Long-Range dependency. Variable 'kmeans' used at line 63 is defined at line 62 and has a Short-Range dependency. Variable 'X' used at line 63 is defined at line 29 and has a Long-Range dependency. Variable 'kmeans' used at line 64 is defined at line 62 and has a Short-Range dependency. Variable 'X' used at line 64 is defined at line 29 and has a Long-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2, 'Variable Long-Range': 2}
infilling_python
Timeseries_Clustering
73
73
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')"]
['labels_hc = hc.fit_predict(X)']
['print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'hc' used at line 73 is defined at line 72 and has a Short-Range dependency. Variable 'X' used at line 73 is defined at line 29 and has a Long-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Long-Range': 1}
infilling_python
Timeseries_Clustering
79
80
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)']
['cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]']
['print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'd' used at line 79 is defined at line 49 and has a Medium-Range dependency. Variable 'norm_ret_df' used at line 80 is defined at line 20 and has a Long-Range dependency. Variable 'cluster1_asset_list' used at line 80 is defined at line 79 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Timeseries_Clustering
84
84
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns']
['cumulative_norm_ret=clusters_norm_ret_df.cumsum()']
['', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'clusters_norm_ret_df' used at line 84 is defined at line 80 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
infilling_python
Timeseries_Clustering
87
87
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns']
['pair_order_list = itertools.combinations(cluster1_asset_list,2)']
['pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Library 'itertools' used at line 87 is imported at line 12 and has a Long-Range dependency. Variable 'cluster1_asset_list' used at line 87 is defined at line 79 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Timeseries_Clustering
92
97
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]']
['for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)']
['', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 92}, {'reason_category': 'Loop Body', 'usage_line': 93}, {'reason_category': 'Loop Body', 'usage_line': 94}, {'reason_category': 'Loop Body', 'usage_line': 95}, {'reason_category': 'Loop Body', 'usage_line': 96}, {'reason_category': 'Loop Body', 'usage_line': 97}]
Variable 'pairs' used at line 92 is defined at line 88 and has a Short-Range dependency. Variable 'asset1_list' used at line 93 is defined at line 89 and has a Short-Range dependency. Variable 'pairs' used at line 93 is defined at line 88 and has a Short-Range dependency. Variable 'i' used at line 93 is part of a Loop defined at line 92 and has a Short-Range dependency. Variable 'asset2_list' used at line 94 is defined at line 90 and has a Short-Range dependency. Variable 'pairs' used at line 94 is defined at line 88 and has a Short-Range dependency. Variable 'i' used at line 94 is part of a Loop defined at line 92 and has a Short-Range dependency. Library 'LA' used at line 96 is imported at line 15 and has a Long-Range dependency. Variable 'cumulative_norm_ret' used at line 96 is defined at line 84 and has a Medium-Range dependency. Variable 'asset1_list' used at line 96 is defined at line 89 and has a Short-Range dependency. Variable 'i' used at line 96 is part of a Loop defined at line 92 and has a Short-Range dependency. Variable 'asset2_list' used at line 96 is defined at line 90 and has a Short-Range dependency. Variable 'euclidean_distance_list' used at line 97 is defined at line 91 and has a Short-Range dependency. Variable 'dist' used at line 97 is defined at line 96 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 5}
{'Variable Short-Range': 9, 'Variable Loop Short-Range': 3, 'Library Long-Range': 1, 'Variable Medium-Range': 1}
infilling_python
Timeseries_Clustering
96
97
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '']
[' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)']
['', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 96}, {'reason_category': 'Loop Body', 'usage_line': 97}]
Library 'LA' used at line 96 is imported at line 15 and has a Long-Range dependency. Variable 'cumulative_norm_ret' used at line 96 is defined at line 84 and has a Medium-Range dependency. Variable 'asset1_list' used at line 96 is defined at line 89 and has a Short-Range dependency. Variable 'i' used at line 96 is part of a Loop defined at line 92 and has a Short-Range dependency. Variable 'asset2_list' used at line 96 is defined at line 90 and has a Short-Range dependency. Variable 'euclidean_distance_list' used at line 97 is defined at line 91 and has a Short-Range dependency. Variable 'dist' used at line 97 is defined at line 96 and has a Short-Range dependency.
{'Loop Body': 2}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 4, 'Variable Loop Short-Range': 1}
infilling_python
Timeseries_Clustering
102
102
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))']
['sdd_list.sort(key = lambda x: x[1])']
['', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Lambda_Expressions', 'usage_line': 102}]
Variable 'sdd_list' used at line 102 is defined at line 101 and has a Short-Range dependency. Variable 'x' used at line 102 is part of a Lambda_Expressions defined at line 102 and has a Short-Range dependency.
{'Lambda_Expressions': 1}
{'Variable Short-Range': 1, 'Variable Lambda_Expressions Short-Range': 1}
infilling_python
Timeseries_Clustering
96
96
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '']
[' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])']
[' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 96}]
Library 'LA' used at line 96 is imported at line 15 and has a Long-Range dependency. Variable 'cumulative_norm_ret' used at line 96 is defined at line 84 and has a Medium-Range dependency. Variable 'asset1_list' used at line 96 is defined at line 89 and has a Short-Range dependency. Variable 'i' used at line 96 is part of a Loop defined at line 92 and has a Short-Range dependency. Variable 'asset2_list' used at line 96 is defined at line 90 and has a Short-Range dependency.
{'Loop Body': 1}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 2, 'Variable Loop Short-Range': 1}
infilling_python
Timeseries_Clustering
125
127
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)']
[' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)']
['', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 125}, {'reason_category': 'If Body', 'usage_line': 125}, {'reason_category': 'Loop Body', 'usage_line': 126}, {'reason_category': 'If Body', 'usage_line': 126}, {'reason_category': 'Loop Body', 'usage_line': 127}, {'reason_category': 'If Body', 'usage_line': 127}]
Variable 'selected_stocks' used at line 125 is defined at line 114 and has a Medium-Range dependency. Variable 's2' used at line 125 is defined at line 121 and has a Short-Range dependency. Variable 's1' used at line 126 is defined at line 120 and has a Short-Range dependency. Variable 's2' used at line 126 is defined at line 121 and has a Short-Range dependency. Variable 'selected_pairs_messd' used at line 127 is defined at line 115 and has a Medium-Range dependency. Variable 'pair' used at line 127 is defined at line 126 and has a Short-Range dependency.
{'Loop Body': 3, 'If Body': 3}
{'Variable Medium-Range': 2, 'Variable Short-Range': 4}
infilling_python
Timeseries_Clustering
129
129
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '']
[' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):']
[' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 129}, {'reason_category': 'If Condition', 'usage_line': 129}]
Variable 'selected_pairs_messd' used at line 129 is defined at line 115 and has a Medium-Range dependency. Library 'math' used at line 129 is imported at line 13 and has a Long-Range dependency. Variable 'cluster1_asset_list' used at line 129 is defined at line 79 and has a Long-Range dependency.
{'Loop Body': 1, 'If Condition': 1}
{'Variable Medium-Range': 1, 'Library Long-Range': 1, 'Variable Long-Range': 1}
infilling_python
Timeseries_Clustering
133
133
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]']
['opt_asset2=selected_stocks[1:len(selected_stocks):2]']
['', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'selected_stocks' used at line 133 is defined at line 114 and has a Medium-Range dependency.
{}
{'Variable Medium-Range': 1}
infilling_python
Timeseries_Clustering
142
144
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '']
['for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)']
['', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 142}, {'reason_category': 'Loop Body', 'usage_line': 143}, {'reason_category': 'Loop Body', 'usage_line': 144}]
Variable 'pairs' used at line 142 is defined at line 88 and has a Long-Range dependency. Library 'pearsonr' used at line 143 is imported at line 16 and has a Long-Range dependency. Variable 'clusters_norm_ret_df' used at line 143 is defined at line 80 and has a Long-Range dependency. Variable 'pairs' used at line 143 is defined at line 88 and has a Long-Range dependency. Variable 'i' used at line 143 is part of a Loop defined at line 142 and has a Short-Range dependency. Variable 'pearson_corr_list' used at line 144 is defined at line 140 and has a Short-Range dependency. Variable 'corr' used at line 144 is defined at line 143 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 2}
{'Variable Long-Range': 3, 'Library Long-Range': 1, 'Variable Loop Short-Range': 1, 'Variable Short-Range': 2}
infilling_python
Timeseries_Clustering
166
169
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):']
[' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)']
['', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 166}, {'reason_category': 'If Body', 'usage_line': 166}, {'reason_category': 'Loop Body', 'usage_line': 167}, {'reason_category': 'If Body', 'usage_line': 167}, {'reason_category': 'Loop Body', 'usage_line': 168}, {'reason_category': 'If Body', 'usage_line': 168}, {'reason_category': 'Loop Body', 'usage_line': 169}, {'reason_category': 'If Body', 'usage_line': 169}]
Variable 'selected_stocks' used at line 166 is defined at line 156 and has a Short-Range dependency. Variable 's1' used at line 166 is defined at line 162 and has a Short-Range dependency. Variable 'selected_stocks' used at line 167 is defined at line 156 and has a Medium-Range dependency. Variable 's2' used at line 167 is defined at line 163 and has a Short-Range dependency. Variable 's1' used at line 168 is defined at line 162 and has a Short-Range dependency. Variable 's2' used at line 168 is defined at line 163 and has a Short-Range dependency. Variable 'selected_pairs_corr' used at line 169 is defined at line 157 and has a Medium-Range dependency. Variable 'pair' used at line 169 is defined at line 168 and has a Short-Range dependency.
{'Loop Body': 4, 'If Body': 4}
{'Variable Short-Range': 6, 'Variable Medium-Range': 2}
infilling_python
Timeseries_Clustering
175
175
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]']
['opt_asset2=selected_stocks[1:len(selected_stocks):2]']
['', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'selected_stocks' used at line 175 is defined at line 156 and has a Medium-Range dependency.
{}
{'Variable Medium-Range': 1}
infilling_python
Timeseries_Clustering
183
186
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):']
[' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])']
['print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 183}, {'reason_category': 'Loop Body', 'usage_line': 184}, {'reason_category': 'Loop Body', 'usage_line': 185}, {'reason_category': 'If Condition', 'usage_line': 185}, {'reason_category': 'Loop Body', 'usage_line': 186}, {'reason_category': 'If Body', 'usage_line': 186}]
Library 'coint' used at line 183 is imported at line 17 and has a Long-Range dependency. Library 'np' used at line 183 is imported at line 2 and has a Long-Range dependency. Variable 'clusters_norm_ret_df' used at line 183 is defined at line 80 and has a Long-Range dependency. Variable 'pairs' used at line 183 is defined at line 88 and has a Long-Range dependency. Variable 'i' used at line 183 is part of a Loop defined at line 182 and has a Short-Range dependency. Variable 'pvalue' used at line 185 is defined at line 183 and has a Short-Range dependency. Variable 'confidence_level' used at line 185 is defined at line 184 and has a Short-Range dependency. Variable 'coint_pairs' used at line 186 is defined at line 180 and has a Short-Range dependency. Variable 'pairs' used at line 186 is defined at line 88 and has a Long-Range dependency. Variable 'i' used at line 186 is part of a Loop defined at line 182 and has a Short-Range dependency.
{'Loop Body': 4, 'If Condition': 1, 'If Body': 1}
{'Library Long-Range': 2, 'Variable Long-Range': 3, 'Variable Loop Short-Range': 2, 'Variable Short-Range': 3}
infilling_python
Timeseries_Clustering
183
183
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):']
[' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))']
[' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 183}]
Library 'coint' used at line 183 is imported at line 17 and has a Long-Range dependency. Library 'np' used at line 183 is imported at line 2 and has a Long-Range dependency. Variable 'clusters_norm_ret_df' used at line 183 is defined at line 80 and has a Long-Range dependency. Variable 'pairs' used at line 183 is defined at line 88 and has a Long-Range dependency. Variable 'i' used at line 183 is part of a Loop defined at line 182 and has a Short-Range dependency.
{'Loop Body': 1}
{'Library Long-Range': 2, 'Variable Long-Range': 2, 'Variable Loop Short-Range': 1}
infilling_python
Timeseries_Clustering
186
186
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:']
[' coint_pairs.append(pairs[i])']
['print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 186}, {'reason_category': 'If Body', 'usage_line': 186}]
Variable 'coint_pairs' used at line 186 is defined at line 180 and has a Short-Range dependency. Variable 'pairs' used at line 186 is defined at line 88 and has a Long-Range dependency. Variable 'i' used at line 186 is part of a Loop defined at line 182 and has a Short-Range dependency.
{'Loop Body': 1, 'If Body': 1}
{'Variable Short-Range': 1, 'Variable Long-Range': 1, 'Variable Loop Short-Range': 1}
infilling_python
Image_Transformation
16
16
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)']
['B_array = np.array(B)']
["print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 16 is imported at line 3 and has a Medium-Range dependency. Variable 'B' used at line 16 is defined at line 14 and has a Short-Range dependency.
{}
{'Library Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Transformation
23
23
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '']
['if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:']
[" print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Condition', 'usage_line': 23}]
Variable 'end_size' used at line 23 is defined at line 21 and has a Short-Range dependency. Variable 'A_array' used at line 23 is defined at line 15 and has a Short-Range dependency. Variable 'B_array' used at line 23 is defined at line 16 and has a Short-Range dependency.
{'If Condition': 1}
{'Variable Short-Range': 3}
infilling_python
Image_Transformation
26
26
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '']
['A_resized = cv2.resize(A_array, (end_size, end_size))']
['', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'cv2' used at line 26 is imported at line 2 and has a Medium-Range dependency. Variable 'A_array' used at line 26 is defined at line 15 and has a Medium-Range dependency. Variable 'end_size' used at line 26 is defined at line 21 and has a Short-Range dependency.
{}
{'Library Medium-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Transformation
29
36
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):']
[' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized']
['B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Variable 'img_array' used at line 29 is defined at line 28 and has a Short-Range dependency. Variable 'end_size' used at line 29 is defined at line 28 and has a Short-Range dependency. Variable 'x_start' used at line 30 is defined at line 29 and has a Short-Range dependency. Variable 'end_size' used at line 30 is defined at line 28 and has a Short-Range dependency. Variable 'img_array' used at line 32 is defined at line 28 and has a Short-Range dependency. Variable 'end_size' used at line 32 is defined at line 28 and has a Short-Range dependency. Variable 'y_start' used at line 33 is defined at line 32 and has a Short-Range dependency. Variable 'end_size' used at line 33 is defined at line 28 and has a Short-Range dependency. Variable 'img_array' used at line 35 is defined at line 28 and has a Short-Range dependency. Variable 'x_start' used at line 35 is defined at line 29 and has a Short-Range dependency. Variable 'x_end' used at line 35 is defined at line 30 and has a Short-Range dependency. Variable 'y_start' used at line 35 is defined at line 32 and has a Short-Range dependency. Variable 'y_end' used at line 35 is defined at line 33 and has a Short-Range dependency. Variable 'img_resized' used at line 36 is defined at line 35 and has a Short-Range dependency.
{}
{'Variable Short-Range': 14}
infilling_python
Image_Transformation
32
33
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '']
[' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size']
['', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Variable 'img_array' used at line 32 is defined at line 28 and has a Short-Range dependency. Variable 'end_size' used at line 32 is defined at line 28 and has a Short-Range dependency. Variable 'y_start' used at line 33 is defined at line 32 and has a Short-Range dependency. Variable 'end_size' used at line 33 is defined at line 28 and has a Short-Range dependency.
{}
{'Variable Short-Range': 4}
infilling_python
Image_Transformation
35
35
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '']
[' img_resized = img_array[x_start:x_end, y_start:y_end, :]']
[' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Variable 'img_array' used at line 35 is defined at line 28 and has a Short-Range dependency. Variable 'x_start' used at line 35 is defined at line 29 and has a Short-Range dependency. Variable 'x_end' used at line 35 is defined at line 30 and has a Short-Range dependency. Variable 'y_start' used at line 35 is defined at line 32 and has a Short-Range dependency. Variable 'y_end' used at line 35 is defined at line 33 and has a Short-Range dependency.
{}
{'Variable Short-Range': 5}
infilling_python
Image_Transformation
42
46
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]']
['for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)']
['print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 42}, {'reason_category': 'Loop Body', 'usage_line': 43}, {'reason_category': 'If Condition', 'usage_line': 43}, {'reason_category': 'If Body', 'usage_line': 44}, {'reason_category': 'Loop Body', 'usage_line': 44}, {'reason_category': 'Loop Body', 'usage_line': 45}, {'reason_category': 'Else Reasoning', 'usage_line': 45}, {'reason_category': 'Loop Body', 'usage_line': 46}, {'reason_category': 'Else Reasoning', 'usage_line': 46}]
Variable 'A_resized' used at line 42 is defined at line 26 and has a Medium-Range dependency. Variable 'row' used at line 43 is part of a Loop defined at line 42 and has a Short-Range dependency. Library 'np' used at line 44 is imported at line 3 and has a Long-Range dependency. Variable 'B_resized' used at line 44 is defined at line 37 and has a Short-Range dependency. Variable 'row' used at line 44 is part of a Loop defined at line 42 and has a Short-Range dependency. Library 'np' used at line 46 is imported at line 3 and has a Long-Range dependency. Variable 'A_resized' used at line 46 is defined at line 26 and has a Medium-Range dependency. Variable 'row' used at line 46 is part of a Loop defined at line 42 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 4, 'If Condition': 1, 'If Body': 1, 'Else Reasoning': 2}
{'Variable Medium-Range': 2, 'Variable Loop Short-Range': 3, 'Library Long-Range': 2, 'Variable Short-Range': 1}
infilling_python
Image_Transformation
43
46
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):']
[' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)']
['print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'Loop Body', 'usage_line': 43}, {'reason_category': 'If Condition', 'usage_line': 43}, {'reason_category': 'If Body', 'usage_line': 44}, {'reason_category': 'Loop Body', 'usage_line': 44}, {'reason_category': 'Loop Body', 'usage_line': 45}, {'reason_category': 'Else Reasoning', 'usage_line': 45}, {'reason_category': 'Loop Body', 'usage_line': 46}, {'reason_category': 'Else Reasoning', 'usage_line': 46}]
Variable 'row' used at line 43 is part of a Loop defined at line 42 and has a Short-Range dependency. Library 'np' used at line 44 is imported at line 3 and has a Long-Range dependency. Variable 'B_resized' used at line 44 is defined at line 37 and has a Short-Range dependency. Variable 'row' used at line 44 is part of a Loop defined at line 42 and has a Short-Range dependency. Library 'np' used at line 46 is imported at line 3 and has a Long-Range dependency. Variable 'A_resized' used at line 46 is defined at line 26 and has a Medium-Range dependency. Variable 'row' used at line 46 is part of a Loop defined at line 42 and has a Short-Range dependency.
{'Loop Body': 4, 'If Condition': 1, 'If Body': 1, 'Else Reasoning': 2}
{'Variable Loop Short-Range': 3, 'Library Long-Range': 2, 'Variable Short-Range': 1, 'Variable Medium-Range': 1}
infilling_python
Image_Transformation
43
44
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):']
[' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)']
[' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'Loop Body', 'usage_line': 43}, {'reason_category': 'If Condition', 'usage_line': 43}, {'reason_category': 'If Body', 'usage_line': 44}, {'reason_category': 'Loop Body', 'usage_line': 44}]
Variable 'row' used at line 43 is part of a Loop defined at line 42 and has a Short-Range dependency. Library 'np' used at line 44 is imported at line 3 and has a Long-Range dependency. Variable 'B_resized' used at line 44 is defined at line 37 and has a Short-Range dependency. Variable 'row' used at line 44 is part of a Loop defined at line 42 and has a Short-Range dependency.
{'Loop Body': 2, 'If Condition': 1, 'If Body': 1}
{'Variable Loop Short-Range': 2, 'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Transformation
46
46
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:']
[' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)']
['print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'Loop Body', 'usage_line': 46}, {'reason_category': 'Else Reasoning', 'usage_line': 46}]
Library 'np' used at line 46 is imported at line 3 and has a Long-Range dependency. Variable 'A_resized' used at line 46 is defined at line 26 and has a Medium-Range dependency. Variable 'row' used at line 46 is part of a Loop defined at line 42 and has a Short-Range dependency.
{'Loop Body': 1, 'Else Reasoning': 1}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Loop Short-Range': 1}
infilling_python
Image_Transformation
58
58
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)']
['mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)']
['', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 58 is imported at line 3 and has a Long-Range dependency. Variable 'pepper' used at line 58 is defined at line 54 and has a Short-Range dependency. Variable 'lower_yellow' used at line 58 is defined at line 56 and has a Short-Range dependency. Variable 'upper_yellow' used at line 58 is defined at line 57 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
Image_Transformation
60
60
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '']
['result = np.where(mask, 1, 0)']
['', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 60 is imported at line 3 and has a Long-Range dependency. Variable 'mask' used at line 60 is defined at line 58 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Transformation
70
72
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)']
['mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)']
['', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 70 is imported at line 3 and has a Long-Range dependency. Variable 'hsv_img' used at line 70 is defined at line 66 and has a Short-Range dependency. Variable 'lower_yellow' used at line 70 is defined at line 68 and has a Short-Range dependency. Variable 'upper_yellow' used at line 70 is defined at line 69 and has a Short-Range dependency. Library 'np' used at line 72 is imported at line 3 and has a Long-Range dependency. Variable 'mask' used at line 72 is defined at line 70 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 4}
infilling_python
Image_Transformation
83
84
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):']
[' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix']
['', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 83 is imported at line 3 and has a Long-Range dependency. Variable 'dx' used at line 83 is defined at line 82 and has a Short-Range dependency. Variable 'dy' used at line 83 is defined at line 82 and has a Short-Range dependency. Variable 'translation_matrix' used at line 84 is defined at line 83 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
Image_Transformation
87
92
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):']
[' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix']
['', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Condition', 'usage_line': 87}, {'reason_category': 'If Body', 'usage_line': 88}]
Variable 'radians' used at line 87 is defined at line 86 and has a Short-Range dependency. Library 'np' used at line 88 is imported at line 3 and has a Long-Range dependency. Library 'np' used at line 89 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 89 is defined at line 88 and has a Short-Range dependency. Library 'np' used at line 90 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 90 is defined at line 88 and has a Short-Range dependency. Library 'np' used at line 91 is imported at line 3 and has a Long-Range dependency. Variable 'costheta' used at line 91 is defined at line 89 and has a Short-Range dependency. Variable 'sintheta' used at line 91 is defined at line 90 and has a Short-Range dependency. Variable 'rotation_matrix' used at line 92 is defined at line 91 and has a Short-Range dependency.
{'If Condition': 1, 'If Body': 1}
{'Variable Short-Range': 6, 'Library Long-Range': 4}
infilling_python
Image_Transformation
89
92
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)']
[' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix']
['', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 89 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 89 is defined at line 88 and has a Short-Range dependency. Library 'np' used at line 90 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 90 is defined at line 88 and has a Short-Range dependency. Library 'np' used at line 91 is imported at line 3 and has a Long-Range dependency. Variable 'costheta' used at line 91 is defined at line 89 and has a Short-Range dependency. Variable 'sintheta' used at line 91 is defined at line 90 and has a Short-Range dependency. Variable 'rotation_matrix' used at line 92 is defined at line 91 and has a Short-Range dependency.
{}
{'Library Long-Range': 3, 'Variable Short-Range': 5}
infilling_python
Image_Transformation
91
92
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)']
[' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix']
['', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 91 is imported at line 3 and has a Long-Range dependency. Variable 'costheta' used at line 91 is defined at line 89 and has a Short-Range dependency. Variable 'sintheta' used at line 91 is defined at line 90 and has a Short-Range dependency. Variable 'rotation_matrix' used at line 92 is defined at line 91 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
Image_Transformation
95
103
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):']
[' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix']
['', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Condition', 'usage_line': 95}, {'reason_category': 'If Body', 'usage_line': 96}]
Variable 'radians' used at line 95 is defined at line 94 and has a Short-Range dependency. Library 'np' used at line 96 is imported at line 3 and has a Long-Range dependency. Library 'np' used at line 97 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 97 is defined at line 94 and has a Short-Range dependency. Library 'np' used at line 98 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 98 is defined at line 94 and has a Short-Range dependency. Library 'np' used at line 100 is imported at line 3 and has a Long-Range dependency. Variable 'scale_factor' used at line 100 is defined at line 94 and has a Short-Range dependency. Variable 'costheta' used at line 100 is defined at line 97 and has a Short-Range dependency. Variable 'sintheta' used at line 100 is defined at line 98 and has a Short-Range dependency. Variable 'dx' used at line 100 is defined at line 94 and has a Short-Range dependency. Variable 'scale_factor' used at line 101 is defined at line 94 and has a Short-Range dependency. Variable 'sintheta' used at line 101 is defined at line 98 and has a Short-Range dependency. Variable 'costheta' used at line 101 is defined at line 97 and has a Short-Range dependency. Variable 'dy' used at line 101 is defined at line 94 and has a Short-Range dependency. Variable 'similarity_matrix' used at line 103 is defined at line 100 and has a Short-Range dependency.
{'If Condition': 1, 'If Body': 1}
{'Variable Short-Range': 12, 'Library Long-Range': 4}
infilling_python
Image_Transformation
97
103
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)']
[' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix']
['', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 97 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 97 is defined at line 94 and has a Short-Range dependency. Library 'np' used at line 98 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 98 is defined at line 94 and has a Short-Range dependency. Library 'np' used at line 100 is imported at line 3 and has a Long-Range dependency. Variable 'scale_factor' used at line 100 is defined at line 94 and has a Short-Range dependency. Variable 'costheta' used at line 100 is defined at line 97 and has a Short-Range dependency. Variable 'sintheta' used at line 100 is defined at line 98 and has a Short-Range dependency. Variable 'dx' used at line 100 is defined at line 94 and has a Short-Range dependency. Variable 'scale_factor' used at line 101 is defined at line 94 and has a Short-Range dependency. Variable 'sintheta' used at line 101 is defined at line 98 and has a Short-Range dependency. Variable 'costheta' used at line 101 is defined at line 97 and has a Short-Range dependency. Variable 'dy' used at line 101 is defined at line 94 and has a Short-Range dependency. Variable 'similarity_matrix' used at line 103 is defined at line 100 and has a Short-Range dependency.
{}
{'Library Long-Range': 3, 'Variable Short-Range': 11}