task_type
stringclasses
4 values
code_task
stringclasses
15 values
start_line
int64
4
1.79k
end_line
int64
4
1.8k
before
stringlengths
79
76.1k
between
stringlengths
17
806
after
stringlengths
2
72.6k
reason_categories_output
stringlengths
2
2.24k
horizon_categories_output
stringlengths
83
3.99k
reason_freq_analysis
stringclasses
150 values
horizon_freq_analysis
stringlengths
23
185
completion_python
Credit_Scoring_Fairness
194
194
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')"]
['rf_pred = rf.fit(x_train, y_train).predict(x_test)']
['test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'rf' used at line 194 is defined at line 193 and has a Short-Range dependency. Variable 'x_train' used at line 194 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 194 is defined at line 39 and has a Long-Range dependency. Variable 'x_test' used at line 194 is defined at line 39 and has a Long-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Long-Range': 3}
completion_python
Credit_Scoring_Fairness
330
330
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') "]
['svm_pred = svm.fit(x_train, y_train).predict(x_test)']
['', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'svm' used at line 330 is defined at line 329 and has a Short-Range dependency. Variable 'x_train' used at line 330 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 330 is defined at line 39 and has a Long-Range dependency. Variable 'x_test' used at line 330 is defined at line 39 and has a Long-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Long-Range': 3}
completion_python
Credit_Scoring_Fairness
471
472
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()']
['linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)']
['# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'linear_regression_model' used at line 471 is defined at line 470 and has a Short-Range dependency. Variable 'x_train' used at line 471 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 471 is defined at line 39 and has a Long-Range dependency. Variable 'linear_regression_model' used at line 472 is defined at line 470 and has a Short-Range dependency. Variable 'x_test' used at line 472 is defined at line 39 and has a Long-Range dependency.
{}
{'Variable Short-Range': 2, 'Variable Long-Range': 3}
completion_python
Credit_Scoring_Fairness
472
472
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)']
['prediction = linear_regression_model.predict(x_test)']
['# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'linear_regression_model' used at line 472 is defined at line 470 and has a Short-Range dependency. Variable 'x_test' used at line 472 is defined at line 39 and has a Long-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Long-Range': 1}
completion_python
GAN_model
25
25
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):']
[' return tf.keras.activations.swish(x)']
['', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 25 is imported at line 16 and has a Short-Range dependency. Variable 'x' used at line 25 is defined at line 24 and has a Short-Range dependency.
{}
{'Library Short-Range': 1, 'Variable Short-Range': 1}
completion_python
GAN_model
28
28
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):']
[' return tf.keras.activations.relu(x)']
['', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 28 is imported at line 16 and has a Medium-Range dependency. Variable 'x' used at line 28 is defined at line 27 and has a Short-Range dependency.
{}
{'Library Medium-Range': 1, 'Variable Short-Range': 1}
completion_python
GAN_model
31
31
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):']
[' return tf.keras.layers.LeakyReLU(.2)(x)']
['', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 31 is imported at line 16 and has a Medium-Range dependency. Variable 'x' used at line 31 is defined at line 30 and has a Short-Range dependency.
{}
{'Library Medium-Range': 1, 'Variable Short-Range': 1}
completion_python
GAN_model
37
37
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):']
[' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))']
['', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'x' used at line 37 is defined at line 36 and has a Short-Range dependency. Library 'tf' used at line 37 is imported at line 16 and has a Medium-Range dependency.
{}
{'Variable Short-Range': 1, 'Library Medium-Range': 1}
completion_python
GAN_model
40
40
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):']
[' return layers.BatchNormalization(axis=-1)(x)']
['', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 40 is imported at line 12 and has a Medium-Range dependency. Variable 'x' used at line 40 is defined at line 39 and has a Short-Range dependency.
{}
{'Library Medium-Range': 1, 'Variable Short-Range': 1}
completion_python
GAN_model
67
69
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):']
[' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)']
['', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 67 is imported at line 12 and has a Long-Range dependency. Variable 'factor' used at line 68 is defined at line 66 and has a Short-Range dependency. Variable 'x' used at line 69 is defined at line 66 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
72
73
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):']
[' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)']
['', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 72 is imported at line 12 and has a Long-Range dependency. Variable 'filters' used at line 72 is defined at line 71 and has a Short-Range dependency. Variable 'kernel_size' used at line 72 is defined at line 71 and has a Short-Range dependency. Variable 'factor' used at line 73 is defined at line 71 and has a Short-Range dependency. Variable 'padding' used at line 73 is defined at line 71 and has a Short-Range dependency. Variable 'x' used at line 73 is defined at line 71 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 5}
completion_python
GAN_model
76
76
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):']
[" return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)"]
['', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 76 is imported at line 12 and has a Long-Range dependency. Variable 'factor' used at line 76 is defined at line 75 and has a Short-Range dependency. Variable 'x' used at line 76 is defined at line 75 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
107
121
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ']
[' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 107 is defined at line 102 and has a Short-Range dependency. Variable 'self' used at line 109 is defined at line 102 and has a Short-Range dependency. Variable 'kernel_size' used at line 109 is defined at line 96 and has a Medium-Range dependency. Variable 'n_channels' used at line 110 is defined at line 103 and has a Short-Range dependency. Variable 'self' used at line 111 is defined at line 102 and has a Short-Range dependency. Variable 'filters' used at line 111 is defined at line 95 and has a Medium-Range dependency. Variable 'self' used at line 112 is defined at line 102 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 112 is defined at line 98 and has a Medium-Range dependency. Library 'tf' used at line 114 is imported at line 16 and has a Long-Range dependency. Variable 'self' used at line 116 is defined at line 102 and has a Medium-Range dependency. Variable 'self' used at line 118 is defined at line 102 and has a Medium-Range dependency. Variable 'filters' used at line 118 is defined at line 95 and has a Medium-Range dependency. Variable 'self' used at line 119 is defined at line 102 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 119 is defined at line 99 and has a Medium-Range dependency. Library 'tf' used at line 121 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Short-Range': 5, 'Variable Medium-Range': 8, 'Library Long-Range': 2}
completion_python
GAN_model
125
133
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()']
[' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config']
[' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'config' used at line 125 is defined at line 124 and has a Short-Range dependency. Variable 'self' used at line 126 is defined at line 123 and has a Short-Range dependency. Variable 'filters' used at line 126 is defined at line 95 and has a Long-Range dependency. Variable 'self' used at line 127 is defined at line 123 and has a Short-Range dependency. Variable 'kernel_size' used at line 127 is defined at line 96 and has a Long-Range dependency. Variable 'self' used at line 128 is defined at line 123 and has a Short-Range dependency. Variable 'strides' used at line 128 is defined at line 97 and has a Long-Range dependency. Variable 'self' used at line 129 is defined at line 123 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 129 is defined at line 98 and has a Long-Range dependency. Variable 'self' used at line 130 is defined at line 123 and has a Short-Range dependency. Variable 'bias_initializer' used at line 130 is defined at line 99 and has a Long-Range dependency. Variable 'self' used at line 131 is defined at line 123 and has a Short-Range dependency. Variable 'gain' used at line 131 is defined at line 100 and has a Long-Range dependency. Variable 'config' used at line 133 is defined at line 124 and has a Short-Range dependency.
{}
{'Variable Short-Range': 8, 'Variable Long-Range': 6}
completion_python
GAN_model
136
140
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):']
[" x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x']
['', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 136 is imported at line 16 and has a Long-Range dependency. Variable 'inputs' used at line 136 is defined at line 135 and has a Short-Range dependency. Variable 'self' used at line 136 is defined at line 135 and has a Short-Range dependency. Variable 'scale' used at line 136 is defined at line 105 and has a Long-Range dependency. Variable 'w' used at line 136 is defined at line 107 and has a Medium-Range dependency. Variable 'strides' used at line 136 is defined at line 97 and has a Long-Range dependency. Variable 'self' used at line 138 is defined at line 135 and has a Short-Range dependency. Variable 'b' used at line 138 is defined at line 116 and has a Medium-Range dependency. Variable 'x' used at line 138 is defined at line 136 and has a Short-Range dependency. Variable 'x' used at line 140 is defined at line 138 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 5, 'Variable Long-Range': 2, 'Variable Medium-Range': 2}
completion_python
GAN_model
138
140
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '']
[' x = x + self.b', '', ' return x']
['', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 138 is defined at line 135 and has a Short-Range dependency. Variable 'b' used at line 138 is defined at line 116 and has a Medium-Range dependency. Variable 'x' used at line 138 is defined at line 136 and has a Short-Range dependency. Variable 'x' used at line 140 is defined at line 138 and has a Short-Range dependency.
{}
{'Variable Short-Range': 3, 'Variable Medium-Range': 1}
completion_python
GAN_model
152
158
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ']
[' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain']
['', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Class 'EqualizedConv2DTranspose' used at line 152 is defined at line 142 and has a Short-Range dependency. Variable 'self' used at line 152 is defined at line 143 and has a Short-Range dependency. Variable 'self' used at line 153 is defined at line 143 and has a Short-Range dependency. Variable 'filters' used at line 153 is defined at line 144 and has a Short-Range dependency. Variable 'self' used at line 154 is defined at line 143 and has a Medium-Range dependency. Variable 'kernel_size' used at line 154 is defined at line 145 and has a Short-Range dependency. Variable 'self' used at line 155 is defined at line 143 and has a Medium-Range dependency. Variable 'strides' used at line 155 is defined at line 146 and has a Short-Range dependency. Variable 'self' used at line 156 is defined at line 143 and has a Medium-Range dependency. Variable 'kernel_initializer' used at line 156 is defined at line 147 and has a Short-Range dependency. Variable 'self' used at line 157 is defined at line 143 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 157 is defined at line 148 and has a Short-Range dependency. Variable 'self' used at line 158 is defined at line 143 and has a Medium-Range dependency. Variable 'gain' used at line 158 is defined at line 149 and has a Short-Range dependency.
{}
{'Class Short-Range': 1, 'Variable Short-Range': 8, 'Variable Medium-Range': 5}
completion_python
GAN_model
154
158
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters']
[' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain']
['', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 154 is defined at line 143 and has a Medium-Range dependency. Variable 'kernel_size' used at line 154 is defined at line 145 and has a Short-Range dependency. Variable 'self' used at line 155 is defined at line 143 and has a Medium-Range dependency. Variable 'strides' used at line 155 is defined at line 146 and has a Short-Range dependency. Variable 'self' used at line 156 is defined at line 143 and has a Medium-Range dependency. Variable 'kernel_initializer' used at line 156 is defined at line 147 and has a Short-Range dependency. Variable 'self' used at line 157 is defined at line 143 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 157 is defined at line 148 and has a Short-Range dependency. Variable 'self' used at line 158 is defined at line 143 and has a Medium-Range dependency. Variable 'gain' used at line 158 is defined at line 149 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 5, 'Variable Short-Range': 5}
completion_python
GAN_model
165
179
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ']
[' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 165 is defined at line 160 and has a Short-Range dependency. Variable 'self' used at line 167 is defined at line 160 and has a Short-Range dependency. Variable 'kernel_size' used at line 167 is defined at line 154 and has a Medium-Range dependency. Variable 'self' used at line 168 is defined at line 160 and has a Short-Range dependency. Variable 'filters' used at line 168 is defined at line 153 and has a Medium-Range dependency. Variable 'n_channels' used at line 169 is defined at line 161 and has a Short-Range dependency. Variable 'self' used at line 170 is defined at line 160 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 170 is defined at line 156 and has a Medium-Range dependency. Library 'tf' used at line 172 is imported at line 16 and has a Long-Range dependency. Variable 'self' used at line 174 is defined at line 160 and has a Medium-Range dependency. Variable 'self' used at line 176 is defined at line 160 and has a Medium-Range dependency. Variable 'filters' used at line 176 is defined at line 153 and has a Medium-Range dependency. Variable 'self' used at line 177 is defined at line 160 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 177 is defined at line 157 and has a Medium-Range dependency. Library 'tf' used at line 179 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Short-Range': 5, 'Variable Medium-Range': 8, 'Library Long-Range': 2}
completion_python
GAN_model
161
179
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):']
[' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'input_shape' used at line 161 is defined at line 160 and has a Short-Range dependency. Variable 'self' used at line 162 is defined at line 160 and has a Short-Range dependency. Variable 'n_channels' used at line 162 is defined at line 161 and has a Short-Range dependency. Variable 'kernel_size' used at line 162 is defined at line 154 and has a Short-Range dependency. Variable 'self' used at line 163 is defined at line 160 and has a Short-Range dependency. Library 'tf' used at line 163 is imported at line 16 and has a Long-Range dependency. Variable 'fan_in' used at line 163 is defined at line 162 and has a Short-Range dependency. Variable 'gain' used at line 163 is defined at line 158 and has a Short-Range dependency. Variable 'self' used at line 165 is defined at line 160 and has a Short-Range dependency. Variable 'self' used at line 167 is defined at line 160 and has a Short-Range dependency. Variable 'kernel_size' used at line 167 is defined at line 154 and has a Medium-Range dependency. Variable 'self' used at line 168 is defined at line 160 and has a Short-Range dependency. Variable 'filters' used at line 168 is defined at line 153 and has a Medium-Range dependency. Variable 'n_channels' used at line 169 is defined at line 161 and has a Short-Range dependency. Variable 'self' used at line 170 is defined at line 160 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 170 is defined at line 156 and has a Medium-Range dependency. Library 'tf' used at line 172 is imported at line 16 and has a Long-Range dependency. Variable 'self' used at line 174 is defined at line 160 and has a Medium-Range dependency. Variable 'self' used at line 176 is defined at line 160 and has a Medium-Range dependency. Variable 'filters' used at line 176 is defined at line 153 and has a Medium-Range dependency. Variable 'self' used at line 177 is defined at line 160 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 177 is defined at line 157 and has a Medium-Range dependency. Library 'tf' used at line 179 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Short-Range': 12, 'Library Long-Range': 3, 'Variable Medium-Range': 8}
completion_python
GAN_model
187
191
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,']
[' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config']
['', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 187 is defined at line 181 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 187 is defined at line 156 and has a Long-Range dependency. Variable 'self' used at line 188 is defined at line 181 and has a Short-Range dependency. Variable 'bias_initializer' used at line 188 is defined at line 157 and has a Long-Range dependency. Variable 'self' used at line 189 is defined at line 181 and has a Short-Range dependency. Variable 'gain' used at line 189 is defined at line 158 and has a Long-Range dependency. Variable 'config' used at line 191 is defined at line 182 and has a Short-Range dependency.
{}
{'Variable Short-Range': 4, 'Variable Long-Range': 3}
completion_python
GAN_model
199
201
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '']
[' x = x + self.b', '', ' return x']
['', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 199 is defined at line 193 and has a Short-Range dependency. Variable 'b' used at line 199 is defined at line 174 and has a Medium-Range dependency. Variable 'x' used at line 199 is defined at line 194 and has a Short-Range dependency. Variable 'x' used at line 201 is defined at line 199 and has a Short-Range dependency.
{}
{'Variable Short-Range': 3, 'Variable Medium-Range': 1}
completion_python
GAN_model
194
201
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):']
[' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x']
['', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 194 is imported at line 16 and has a Long-Range dependency. Variable 'inputs' used at line 194 is defined at line 193 and has a Short-Range dependency. Variable 'self' used at line 194 is defined at line 193 and has a Short-Range dependency. Variable 'scale' used at line 194 is defined at line 163 and has a Long-Range dependency. Variable 'w' used at line 194 is defined at line 165 and has a Medium-Range dependency. Variable 'inputs' used at line 195 is defined at line 193 and has a Short-Range dependency. Variable 'self' used at line 195 is defined at line 193 and has a Short-Range dependency. Variable 'strides' used at line 195 is defined at line 155 and has a Long-Range dependency. Variable 'self' used at line 196 is defined at line 193 and has a Short-Range dependency. Variable 'strides' used at line 196 is defined at line 155 and has a Long-Range dependency. Variable 'self' used at line 199 is defined at line 193 and has a Short-Range dependency. Variable 'b' used at line 199 is defined at line 174 and has a Medium-Range dependency. Variable 'x' used at line 199 is defined at line 194 and has a Short-Range dependency. Variable 'x' used at line 201 is defined at line 199 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 8, 'Variable Long-Range': 3, 'Variable Medium-Range': 2}
completion_python
GAN_model
213
216
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '']
[' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain']
[' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 213 is defined at line 204 and has a Short-Range dependency. Variable 'units' used at line 213 is defined at line 205 and has a Short-Range dependency. Variable 'self' used at line 214 is defined at line 204 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 214 is defined at line 206 and has a Short-Range dependency. Variable 'self' used at line 215 is defined at line 204 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 215 is defined at line 207 and has a Short-Range dependency. Variable 'self' used at line 216 is defined at line 204 and has a Medium-Range dependency. Variable 'gain' used at line 216 is defined at line 208 and has a Short-Range dependency.
{}
{'Variable Short-Range': 6, 'Variable Medium-Range': 2}
completion_python
GAN_model
221
238
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ']
[' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'input_shape' used at line 221 is defined at line 219 and has a Short-Range dependency. Variable 'self' used at line 223 is defined at line 219 and has a Short-Range dependency. Library 'tf' used at line 223 is imported at line 16 and has a Long-Range dependency. Variable 'n_channels' used at line 223 is defined at line 221 and has a Short-Range dependency. Variable 'gain' used at line 223 is defined at line 216 and has a Short-Range dependency. Variable 'self' used at line 225 is defined at line 219 and has a Short-Range dependency. Variable 'n_channels' used at line 227 is defined at line 221 and has a Short-Range dependency. Variable 'self' used at line 228 is defined at line 219 and has a Short-Range dependency. Variable 'units' used at line 228 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 229 is defined at line 219 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 229 is defined at line 206 and has a Medium-Range dependency. Library 'tf' used at line 231 is imported at line 16 and has a Long-Range dependency. Variable 'self' used at line 233 is defined at line 219 and has a Medium-Range dependency. Variable 'self' used at line 235 is defined at line 219 and has a Medium-Range dependency. Variable 'units' used at line 235 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 236 is defined at line 219 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 236 is defined at line 207 and has a Medium-Range dependency. Library 'tf' used at line 238 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Short-Range': 8, 'Library Long-Range': 3, 'Variable Medium-Range': 7}
completion_python
GAN_model
225
238
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ']
[' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 225 is defined at line 219 and has a Short-Range dependency. Variable 'n_channels' used at line 227 is defined at line 221 and has a Short-Range dependency. Variable 'self' used at line 228 is defined at line 219 and has a Short-Range dependency. Variable 'units' used at line 228 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 229 is defined at line 219 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 229 is defined at line 206 and has a Medium-Range dependency. Library 'tf' used at line 231 is imported at line 16 and has a Long-Range dependency. Variable 'self' used at line 233 is defined at line 219 and has a Medium-Range dependency. Variable 'self' used at line 235 is defined at line 219 and has a Medium-Range dependency. Variable 'units' used at line 235 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 236 is defined at line 219 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 236 is defined at line 207 and has a Medium-Range dependency. Library 'tf' used at line 238 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Short-Range': 4, 'Variable Medium-Range': 7, 'Library Long-Range': 2}
completion_python
GAN_model
233
238
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ']
[' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 233 is defined at line 219 and has a Medium-Range dependency. Variable 'self' used at line 235 is defined at line 219 and has a Medium-Range dependency. Variable 'units' used at line 235 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 236 is defined at line 219 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 236 is defined at line 207 and has a Medium-Range dependency. Library 'tf' used at line 238 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Medium-Range': 5, 'Library Long-Range': 1}
completion_python
GAN_model
241
248
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):']
[' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config']
[' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'config' used at line 242 is defined at line 241 and has a Short-Range dependency. Variable 'self' used at line 243 is defined at line 240 and has a Short-Range dependency. Variable 'units' used at line 243 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 244 is defined at line 240 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 244 is defined at line 214 and has a Medium-Range dependency. Variable 'self' used at line 245 is defined at line 240 and has a Short-Range dependency. Variable 'bias_initializer' used at line 245 is defined at line 215 and has a Medium-Range dependency. Variable 'self' used at line 246 is defined at line 240 and has a Short-Range dependency. Variable 'gain' used at line 246 is defined at line 216 and has a Medium-Range dependency. Variable 'config' used at line 248 is defined at line 241 and has a Short-Range dependency.
{}
{'Variable Short-Range': 6, 'Variable Medium-Range': 4}
completion_python
GAN_model
242
248
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()']
[' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config']
[' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'config' used at line 242 is defined at line 241 and has a Short-Range dependency. Variable 'self' used at line 243 is defined at line 240 and has a Short-Range dependency. Variable 'units' used at line 243 is defined at line 213 and has a Medium-Range dependency. Variable 'self' used at line 244 is defined at line 240 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 244 is defined at line 214 and has a Medium-Range dependency. Variable 'self' used at line 245 is defined at line 240 and has a Short-Range dependency. Variable 'bias_initializer' used at line 245 is defined at line 215 and has a Medium-Range dependency. Variable 'self' used at line 246 is defined at line 240 and has a Short-Range dependency. Variable 'gain' used at line 246 is defined at line 216 and has a Medium-Range dependency. Variable 'config' used at line 248 is defined at line 241 and has a Short-Range dependency.
{}
{'Variable Short-Range': 6, 'Variable Medium-Range': 4}
completion_python
GAN_model
248
248
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })']
[' return config']
[' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'config' used at line 248 is defined at line 241 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
completion_python
GAN_model
251
251
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):']
[' return tf.matmul(inputs,self.scale*self.w) + self.b']
['', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 251 is imported at line 16 and has a Long-Range dependency. Variable 'inputs' used at line 251 is defined at line 250 and has a Short-Range dependency. Variable 'self' used at line 251 is defined at line 250 and has a Short-Range dependency. Variable 'scale' used at line 251 is defined at line 223 and has a Medium-Range dependency. Variable 'w' used at line 251 is defined at line 225 and has a Medium-Range dependency. Variable 'b' used at line 251 is defined at line 233 and has a Medium-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2, 'Variable Medium-Range': 3}
completion_python
GAN_model
291
299
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))']
[' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model']
['', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Class 'EqualizedConv2D' used at line 291 is defined at line 83 and has a Long-Range dependency. Variable 'filters' used at line 291 is defined at line 281 and has a Short-Range dependency. Variable 'kernel_init' used at line 294 is defined at line 282 and has a Medium-Range dependency. Variable 'x' used at line 294 is defined at line 290 and has a Short-Range dependency. Variable 'norm_func' used at line 296 is defined at line 280 and has a Medium-Range dependency. Variable 'act_func' used at line 296 is defined at line 279 and has a Medium-Range dependency. Library 'tf' used at line 298 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 298 is defined at line 284 and has a Medium-Range dependency. Variable 'x' used at line 298 is defined at line 296 and has a Short-Range dependency. Variable 'model' used at line 299 is defined at line 298 and has a Short-Range dependency.
{}
{'Class Long-Range': 1, 'Variable Short-Range': 4, 'Variable Medium-Range': 4, 'Library Long-Range': 1}
completion_python
GAN_model
296
299
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ']
[' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model']
['', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'norm_func' used at line 296 is defined at line 280 and has a Medium-Range dependency. Variable 'act_func' used at line 296 is defined at line 279 and has a Medium-Range dependency. Library 'tf' used at line 298 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 298 is defined at line 284 and has a Medium-Range dependency. Variable 'x' used at line 298 is defined at line 296 and has a Short-Range dependency. Variable 'model' used at line 299 is defined at line 298 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 3, 'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
298
299
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ']
[' model = tf.keras.models.Model(inputs, [x,x])', ' return model']
['', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 298 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 298 is defined at line 284 and has a Medium-Range dependency. Variable 'x' used at line 298 is defined at line 296 and has a Short-Range dependency. Variable 'model' used at line 299 is defined at line 298 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
328
339
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ']
[' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ']
['', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'norm_func' used at line 328 is defined at line 317 and has a Medium-Range dependency. Variable 'act_func' used at line 328 is defined at line 316 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 328 is defined at line 83 and has a Long-Range dependency. Variable 'filters' used at line 328 is defined at line 320 and has a Short-Range dependency. Variable 'kernel_init' used at line 331 is defined at line 322 and has a Short-Range dependency. Variable 'upsample' used at line 331 is defined at line 326 and has a Short-Range dependency. Variable 'norm_func' used at line 333 is defined at line 317 and has a Medium-Range dependency. Variable 'act_func' used at line 333 is defined at line 316 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 333 is defined at line 83 and has a Long-Range dependency. Variable 'filters' used at line 333 is defined at line 320 and has a Medium-Range dependency. Variable 'kernel_init' used at line 336 is defined at line 322 and has a Medium-Range dependency. Variable 'x' used at line 336 is defined at line 333 and has a Short-Range dependency. Library 'tf' used at line 338 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 338 is defined at line 324 and has a Medium-Range dependency. Variable 'x' used at line 338 is defined at line 333 and has a Short-Range dependency. Variable 'upsample' used at line 338 is defined at line 326 and has a Medium-Range dependency. Variable 'model' used at line 339 is defined at line 338 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 8, 'Class Long-Range': 2, 'Variable Short-Range': 6, 'Library Long-Range': 1}
completion_python
GAN_model
333
339
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ']
[' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ']
['', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'norm_func' used at line 333 is defined at line 317 and has a Medium-Range dependency. Variable 'act_func' used at line 333 is defined at line 316 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 333 is defined at line 83 and has a Long-Range dependency. Variable 'filters' used at line 333 is defined at line 320 and has a Medium-Range dependency. Variable 'kernel_init' used at line 336 is defined at line 322 and has a Medium-Range dependency. Variable 'x' used at line 336 is defined at line 333 and has a Short-Range dependency. Library 'tf' used at line 338 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 338 is defined at line 324 and has a Medium-Range dependency. Variable 'x' used at line 338 is defined at line 333 and has a Short-Range dependency. Variable 'upsample' used at line 338 is defined at line 326 and has a Medium-Range dependency. Variable 'model' used at line 339 is defined at line 338 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 6, 'Class Long-Range': 1, 'Variable Short-Range': 3, 'Library Long-Range': 1}
completion_python
GAN_model
338
339
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ']
[' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ']
['', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 338 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 338 is defined at line 324 and has a Medium-Range dependency. Variable 'x' used at line 338 is defined at line 333 and has a Short-Range dependency. Variable 'upsample' used at line 338 is defined at line 326 and has a Medium-Range dependency. Variable 'model' used at line 339 is defined at line 338 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 2, 'Variable Short-Range': 2}
completion_python
GAN_model
370
381
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)']
[' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'act_func' used at line 370 is defined at line 358 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 372 is defined at line 83 and has a Long-Range dependency. Variable 'filters' used at line 372 is defined at line 359 and has a Medium-Range dependency. Variable 'kernel_init' used at line 375 is defined at line 360 and has a Medium-Range dependency. Variable 'x' used at line 375 is defined at line 370 and has a Short-Range dependency. Variable 'act_func' used at line 378 is defined at line 358 and has a Medium-Range dependency. Variable 'x' used at line 378 is defined at line 372 and has a Short-Range dependency. Library 'tf' used at line 380 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 380 is defined at line 362 and has a Medium-Range dependency. Variable 'x' used at line 380 is defined at line 378 and has a Short-Range dependency. Variable 'model' used at line 381 is defined at line 380 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 5, 'Class Long-Range': 1, 'Variable Short-Range': 4, 'Library Long-Range': 1}
completion_python
GAN_model
372
381
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ']
[' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Class 'EqualizedConv2D' used at line 372 is defined at line 83 and has a Long-Range dependency. Variable 'filters' used at line 372 is defined at line 359 and has a Medium-Range dependency. Variable 'kernel_init' used at line 375 is defined at line 360 and has a Medium-Range dependency. Variable 'x' used at line 375 is defined at line 370 and has a Short-Range dependency. Variable 'act_func' used at line 378 is defined at line 358 and has a Medium-Range dependency. Variable 'x' used at line 378 is defined at line 372 and has a Short-Range dependency. Library 'tf' used at line 380 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 380 is defined at line 362 and has a Medium-Range dependency. Variable 'x' used at line 380 is defined at line 378 and has a Short-Range dependency. Variable 'model' used at line 381 is defined at line 380 and has a Short-Range dependency.
{}
{'Class Long-Range': 1, 'Variable Medium-Range': 4, 'Variable Short-Range': 4, 'Library Long-Range': 1}
completion_python
GAN_model
378
381
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ']
[' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'act_func' used at line 378 is defined at line 358 and has a Medium-Range dependency. Variable 'x' used at line 378 is defined at line 372 and has a Short-Range dependency. Library 'tf' used at line 380 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 380 is defined at line 362 and has a Medium-Range dependency. Variable 'x' used at line 380 is defined at line 378 and has a Short-Range dependency. Variable 'model' used at line 381 is defined at line 380 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 2, 'Variable Short-Range': 3, 'Library Long-Range': 1}
completion_python
GAN_model
380
381
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ']
[' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 380 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 380 is defined at line 362 and has a Medium-Range dependency. Variable 'x' used at line 380 is defined at line 378 and has a Short-Range dependency. Variable 'model' used at line 381 is defined at line 380 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
405
418
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):']
[' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 405 is imported at line 11 and has a Long-Range dependency. Variable 'image_shape' used at line 405 is defined at line 402 and has a Short-Range dependency. Variable 'filters1' used at line 405 is defined at line 400 and has a Short-Range dependency. Variable 'act_func' used at line 407 is defined at line 398 and has a Short-Range dependency. Class 'EqualizedConv2D' used at line 407 is defined at line 83 and has a Long-Range dependency. Variable 'filters1' used at line 407 is defined at line 400 and has a Short-Range dependency. Variable 'kernel_init' used at line 410 is defined at line 403 and has a Short-Range dependency. Variable 'inputs' used at line 410 is defined at line 405 and has a Short-Range dependency. Variable 'act_func' used at line 411 is defined at line 398 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 411 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 411 is defined at line 401 and has a Short-Range dependency. Variable 'kernel_init' used at line 414 is defined at line 403 and has a Medium-Range dependency. Variable 'x' used at line 414 is defined at line 407 and has a Short-Range dependency. Variable 'downsample_func' used at line 415 is defined at line 399 and has a Medium-Range dependency. Variable 'x' used at line 415 is defined at line 411 and has a Short-Range dependency. Library 'tf' used at line 417 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 417 is defined at line 405 and has a Medium-Range dependency. Variable 'x' used at line 417 is defined at line 415 and has a Short-Range dependency. Variable 'model' used at line 418 is defined at line 417 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 11, 'Class Long-Range': 2, 'Variable Medium-Range': 4}
completion_python
GAN_model
407
418
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ']
[' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'act_func' used at line 407 is defined at line 398 and has a Short-Range dependency. Class 'EqualizedConv2D' used at line 407 is defined at line 83 and has a Long-Range dependency. Variable 'filters1' used at line 407 is defined at line 400 and has a Short-Range dependency. Variable 'kernel_init' used at line 410 is defined at line 403 and has a Short-Range dependency. Variable 'inputs' used at line 410 is defined at line 405 and has a Short-Range dependency. Variable 'act_func' used at line 411 is defined at line 398 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 411 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 411 is defined at line 401 and has a Short-Range dependency. Variable 'kernel_init' used at line 414 is defined at line 403 and has a Medium-Range dependency. Variable 'x' used at line 414 is defined at line 407 and has a Short-Range dependency. Variable 'downsample_func' used at line 415 is defined at line 399 and has a Medium-Range dependency. Variable 'x' used at line 415 is defined at line 411 and has a Short-Range dependency. Library 'tf' used at line 417 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 417 is defined at line 405 and has a Medium-Range dependency. Variable 'x' used at line 417 is defined at line 415 and has a Short-Range dependency. Variable 'model' used at line 418 is defined at line 417 and has a Short-Range dependency.
{}
{'Variable Short-Range': 9, 'Class Long-Range': 2, 'Variable Medium-Range': 4, 'Library Long-Range': 1}
completion_python
GAN_model
411
418
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))']
[' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'act_func' used at line 411 is defined at line 398 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 411 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 411 is defined at line 401 and has a Short-Range dependency. Variable 'kernel_init' used at line 414 is defined at line 403 and has a Medium-Range dependency. Variable 'x' used at line 414 is defined at line 407 and has a Short-Range dependency. Variable 'downsample_func' used at line 415 is defined at line 399 and has a Medium-Range dependency. Variable 'x' used at line 415 is defined at line 411 and has a Short-Range dependency. Library 'tf' used at line 417 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 417 is defined at line 405 and has a Medium-Range dependency. Variable 'x' used at line 417 is defined at line 415 and has a Short-Range dependency. Variable 'model' used at line 418 is defined at line 417 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 4, 'Class Long-Range': 1, 'Variable Short-Range': 5, 'Library Long-Range': 1}
completion_python
GAN_model
415
418
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))']
[' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'downsample_func' used at line 415 is defined at line 399 and has a Medium-Range dependency. Variable 'x' used at line 415 is defined at line 411 and has a Short-Range dependency. Library 'tf' used at line 417 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 417 is defined at line 405 and has a Medium-Range dependency. Variable 'x' used at line 417 is defined at line 415 and has a Short-Range dependency. Variable 'model' used at line 418 is defined at line 417 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 2, 'Variable Short-Range': 3, 'Library Long-Range': 1}
completion_python
GAN_model
437
438
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ']
[' x = top(inputs)', ' x = bottom(x)']
['', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 437}, {'reason_category': 'If Body', 'usage_line': 438}]
Variable 'top' used at line 437 is defined at line 432 and has a Short-Range dependency. Variable 'inputs' used at line 437 is defined at line 434 and has a Short-Range dependency. Variable 'bottom' used at line 438 is defined at line 432 and has a Short-Range dependency. Variable 'x' used at line 438 is defined at line 437 and has a Short-Range dependency.
{'If Body': 2}
{'Variable Short-Range': 4}
completion_python
GAN_model
441
444
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:']
[' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)']
[' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'Else Reasoning', 'usage_line': 441}, {'reason_category': 'Else Reasoning', 'usage_line': 442}, {'reason_category': 'Else Reasoning', 'usage_line': 443}, {'reason_category': 'Else Reasoning', 'usage_line': 444}]
Variable 'top' used at line 441 is defined at line 432 and has a Short-Range dependency. Variable 'inputs' used at line 441 is defined at line 434 and has a Short-Range dependency. Variable 'center' used at line 442 is defined at line 432 and has a Short-Range dependency. Variable 'h' used at line 442 is defined at line 441 and has a Short-Range dependency. Variable 'h' used at line 443 is defined at line 441 and has a Short-Range dependency. Variable 'x' used at line 443 is defined at line 442 and has a Short-Range dependency. Variable 'bottom' used at line 444 is defined at line 432 and has a Medium-Range dependency.
{'Else Reasoning': 4}
{'Variable Short-Range': 6, 'Variable Medium-Range': 1}
completion_python
GAN_model
434
447
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ']
[' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Condition', 'usage_line': 436}, {'reason_category': 'If Body', 'usage_line': 437}, {'reason_category': 'If Body', 'usage_line': 438}, {'reason_category': 'Else Reasoning', 'usage_line': 440}, {'reason_category': 'Else Reasoning', 'usage_line': 441}, {'reason_category': 'Else Reasoning', 'usage_line': 442}, {'reason_category': 'Else Reasoning', 'usage_line': 443}, {'reason_category': 'Else Reasoning', 'usage_line': 444}]
Library 'layers' used at line 434 is imported at line 11 and has a Long-Range dependency. Variable 'input_shape' used at line 434 is defined at line 432 and has a Short-Range dependency. Variable 'filters' used at line 434 is defined at line 432 and has a Short-Range dependency. Variable 'center' used at line 436 is defined at line 432 and has a Short-Range dependency. Variable 'top' used at line 437 is defined at line 432 and has a Short-Range dependency. Variable 'inputs' used at line 437 is defined at line 434 and has a Short-Range dependency. Variable 'bottom' used at line 438 is defined at line 432 and has a Short-Range dependency. Variable 'x' used at line 438 is defined at line 437 and has a Short-Range dependency. Variable 'top' used at line 441 is defined at line 432 and has a Short-Range dependency. Variable 'inputs' used at line 441 is defined at line 434 and has a Short-Range dependency. Variable 'center' used at line 442 is defined at line 432 and has a Short-Range dependency. Variable 'h' used at line 442 is defined at line 441 and has a Short-Range dependency. Variable 'h' used at line 443 is defined at line 441 and has a Short-Range dependency. Variable 'x' used at line 443 is defined at line 442 and has a Short-Range dependency. Variable 'bottom' used at line 444 is defined at line 432 and has a Medium-Range dependency. Library 'tf' used at line 446 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 446 is defined at line 434 and has a Medium-Range dependency. Variable 'x' used at line 446 is defined at line 444 and has a Short-Range dependency. Variable 'model' used at line 447 is defined at line 446 and has a Short-Range dependency.
{'If Condition': 1, 'If Body': 2, 'Else Reasoning': 5}
{'Library Long-Range': 2, 'Variable Short-Range': 15, 'Variable Medium-Range': 2}
completion_python
GAN_model
440
447
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '']
[' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'Else Reasoning', 'usage_line': 440}, {'reason_category': 'Else Reasoning', 'usage_line': 441}, {'reason_category': 'Else Reasoning', 'usage_line': 442}, {'reason_category': 'Else Reasoning', 'usage_line': 443}, {'reason_category': 'Else Reasoning', 'usage_line': 444}]
Variable 'top' used at line 441 is defined at line 432 and has a Short-Range dependency. Variable 'inputs' used at line 441 is defined at line 434 and has a Short-Range dependency. Variable 'center' used at line 442 is defined at line 432 and has a Short-Range dependency. Variable 'h' used at line 442 is defined at line 441 and has a Short-Range dependency. Variable 'h' used at line 443 is defined at line 441 and has a Short-Range dependency. Variable 'x' used at line 443 is defined at line 442 and has a Short-Range dependency. Variable 'bottom' used at line 444 is defined at line 432 and has a Medium-Range dependency. Library 'tf' used at line 446 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 446 is defined at line 434 and has a Medium-Range dependency. Variable 'x' used at line 446 is defined at line 444 and has a Short-Range dependency. Variable 'model' used at line 447 is defined at line 446 and has a Short-Range dependency.
{'Else Reasoning': 5}
{'Variable Short-Range': 8, 'Variable Medium-Range': 2, 'Library Long-Range': 1}
completion_python
GAN_model
484
493
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:']
[' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w']
['', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 484}, {'reason_category': 'If Body', 'usage_line': 485}, {'reason_category': 'If Body', 'usage_line': 486}, {'reason_category': 'If Body', 'usage_line': 487}, {'reason_category': 'If Body', 'usage_line': 488}, {'reason_category': 'If Body', 'usage_line': 489}, {'reason_category': 'If Body', 'usage_line': 490}, {'reason_category': 'If Body', 'usage_line': 491}, {'reason_category': 'If Body', 'usage_line': 492}, {'reason_category': 'If Body', 'usage_line': 493}]
Variable 'downsample_func' used at line 484 is defined at line 468 and has a Medium-Range dependency. Variable 'inputs' used at line 484 is defined at line 475 and has a Short-Range dependency. Variable 'act_func' used at line 486 is defined at line 467 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 486 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 486 is defined at line 471 and has a Medium-Range dependency. Variable 'kernel_init' used at line 489 is defined at line 472 and has a Medium-Range dependency. Variable 'h' used at line 489 is defined at line 484 and has a Short-Range dependency. Variable 'alpha' used at line 491 is defined at line 476 and has a Medium-Range dependency. Variable 'h' used at line 491 is defined at line 486 and has a Short-Range dependency. Variable 'x' used at line 491 is defined at line 482 and has a Short-Range dependency. Variable 'center' used at line 492 is defined at line 465 and has a Medium-Range dependency. Variable 'fade_in' used at line 492 is defined at line 491 and has a Short-Range dependency. Variable 'w' used at line 493 is defined at line 492 and has a Short-Range dependency.
{'If Body': 10}
{'Variable Medium-Range': 6, 'Variable Short-Range': 6, 'Class Long-Range': 1}
completion_python
GAN_model
491
493
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '']
[' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w']
['', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 491}, {'reason_category': 'If Body', 'usage_line': 492}, {'reason_category': 'If Body', 'usage_line': 493}]
Variable 'alpha' used at line 491 is defined at line 476 and has a Medium-Range dependency. Variable 'h' used at line 491 is defined at line 486 and has a Short-Range dependency. Variable 'x' used at line 491 is defined at line 482 and has a Short-Range dependency. Variable 'center' used at line 492 is defined at line 465 and has a Medium-Range dependency. Variable 'fade_in' used at line 492 is defined at line 491 and has a Short-Range dependency. Variable 'w' used at line 493 is defined at line 492 and has a Short-Range dependency.
{'If Body': 3}
{'Variable Medium-Range': 2, 'Variable Short-Range': 4}
completion_python
GAN_model
493
493
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)']
[' x = x+w']
['', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 493}]
Variable 'w' used at line 493 is defined at line 492 and has a Short-Range dependency.
{'If Body': 1}
{'Variable Short-Range': 1}
completion_python
GAN_model
498
503
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:']
[' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)']
[' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 498}, {'reason_category': 'If Body', 'usage_line': 499}, {'reason_category': 'If Body', 'usage_line': 500}, {'reason_category': 'If Body', 'usage_line': 501}, {'reason_category': 'If Body', 'usage_line': 502}, {'reason_category': 'If Body', 'usage_line': 503}]
Class 'EqualizedConv2D' used at line 498 is defined at line 83 and has a Long-Range dependency. Variable 'kernel_init' used at line 501 is defined at line 472 and has a Medium-Range dependency. Variable 'x' used at line 501 is defined at line 495 and has a Short-Range dependency. Library 'tf' used at line 503 is imported at line 15 and has a Long-Range dependency. Variable 'fade_in' used at line 503 is defined at line 498 and has a Short-Range dependency.
{'If Body': 6}
{'Class Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 2, 'Library Long-Range': 1}
completion_python
GAN_model
505
515
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:']
[' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)']
[' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'Else Reasoning', 'usage_line': 505}, {'reason_category': 'Else Reasoning', 'usage_line': 506}, {'reason_category': 'Else Reasoning', 'usage_line': 507}, {'reason_category': 'Else Reasoning', 'usage_line': 508}, {'reason_category': 'Else Reasoning', 'usage_line': 509}, {'reason_category': 'Else Reasoning', 'usage_line': 510}, {'reason_category': 'Else Reasoning', 'usage_line': 511}, {'reason_category': 'Else Reasoning', 'usage_line': 512}, {'reason_category': 'Else Reasoning', 'usage_line': 513}, {'reason_category': 'Else Reasoning', 'usage_line': 514}, {'reason_category': 'Else Reasoning', 'usage_line': 515}]
Class 'EqualizedConv2D' used at line 505 is defined at line 83 and has a Long-Range dependency. Variable 'kernel_init' used at line 508 is defined at line 472 and has a Long-Range dependency. Variable 'upsample' used at line 508 is defined at line 495 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 510 is defined at line 83 and has a Long-Range dependency. Variable 'kernel_init' used at line 513 is defined at line 472 and has a Long-Range dependency. Variable 'x' used at line 513 is defined at line 495 and has a Medium-Range dependency. Library 'tf' used at line 515 is imported at line 15 and has a Long-Range dependency. Variable 'alpha' used at line 515 is defined at line 476 and has a Long-Range dependency. Variable 'upsample' used at line 515 is defined at line 505 and has a Short-Range dependency. Variable 'x' used at line 515 is defined at line 510 and has a Short-Range dependency.
{'Else Reasoning': 11}
{'Class Long-Range': 2, 'Variable Long-Range': 3, 'Variable Medium-Range': 2, 'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
515
515
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ']
[' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)']
[' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'Else Reasoning', 'usage_line': 515}]
Library 'tf' used at line 515 is imported at line 15 and has a Long-Range dependency. Variable 'alpha' used at line 515 is defined at line 476 and has a Long-Range dependency. Variable 'upsample' used at line 515 is defined at line 505 and has a Short-Range dependency. Variable 'x' used at line 515 is defined at line 510 and has a Short-Range dependency.
{'Else Reasoning': 1}
{'Library Long-Range': 1, 'Variable Long-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
517
519
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ']
[' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ']
['', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 517 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 517 is defined at line 475 and has a Long-Range dependency. Variable 'alpha' used at line 517 is defined at line 476 and has a Long-Range dependency. Variable 'fade_in' used at line 517 is defined at line 515 and has a Short-Range dependency. Variable 'model' used at line 519 is defined at line 517 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Long-Range': 2, 'Variable Short-Range': 2}
completion_python
GAN_model
554
564
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ']
[' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Class 'EqualizedConv2D' used at line 554 is defined at line 83 and has a Long-Range dependency. Variable 'filters' used at line 554 is defined at line 542 and has a Medium-Range dependency. Variable 'kernel_init' used at line 557 is defined at line 541 and has a Medium-Range dependency. Variable 'x' used at line 557 is defined at line 552 and has a Short-Range dependency. Variable 'act_func' used at line 560 is defined at line 540 and has a Medium-Range dependency. Class 'EqualizedDense' used at line 561 is defined at line 202 and has a Long-Range dependency. Variable 'x' used at line 561 is defined at line 650 and has a Long-Range dependency. Library 'tf' used at line 563 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 563 is defined at line 544 and has a Medium-Range dependency. Variable 'x' used at line 563 is defined at line 561 and has a Short-Range dependency. Variable 'model' used at line 564 is defined at line 563 and has a Short-Range dependency.
{}
{'Class Long-Range': 2, 'Variable Medium-Range': 4, 'Variable Short-Range': 3, 'Variable Long-Range': 1, 'Library Long-Range': 1}
completion_python
GAN_model
560
564
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ']
[' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'act_func' used at line 560 is defined at line 540 and has a Medium-Range dependency. Class 'EqualizedDense' used at line 561 is defined at line 202 and has a Long-Range dependency. Variable 'x' used at line 561 is defined at line 650 and has a Long-Range dependency. Library 'tf' used at line 563 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 563 is defined at line 544 and has a Medium-Range dependency. Variable 'x' used at line 563 is defined at line 561 and has a Short-Range dependency. Variable 'model' used at line 564 is defined at line 563 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 2, 'Class Long-Range': 1, 'Variable Long-Range': 1, 'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
561
564
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)']
[' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Class 'EqualizedDense' used at line 561 is defined at line 202 and has a Long-Range dependency. Variable 'x' used at line 561 is defined at line 650 and has a Long-Range dependency. Library 'tf' used at line 563 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 563 is defined at line 544 and has a Medium-Range dependency. Variable 'x' used at line 563 is defined at line 561 and has a Short-Range dependency. Variable 'model' used at line 564 is defined at line 563 and has a Short-Range dependency.
{}
{'Class Long-Range': 1, 'Variable Long-Range': 1, 'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
563
564
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ']
[' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 563 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 563 is defined at line 544 and has a Medium-Range dependency. Variable 'x' used at line 563 is defined at line 561 and has a Short-Range dependency. Variable 'model' used at line 564 is defined at line 563 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
589
602
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):']
[' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 589 is imported at line 11 and has a Long-Range dependency. Variable 'image_shape' used at line 589 is defined at line 586 and has a Short-Range dependency. Variable 'filters1' used at line 589 is defined at line 584 and has a Short-Range dependency. Variable 'act_func' used at line 591 is defined at line 582 and has a Short-Range dependency. Class 'EqualizedConv2D' used at line 591 is defined at line 83 and has a Long-Range dependency. Variable 'filters1' used at line 591 is defined at line 584 and has a Short-Range dependency. Variable 'kernel_init' used at line 594 is defined at line 587 and has a Short-Range dependency. Variable 'inputs' used at line 594 is defined at line 589 and has a Short-Range dependency. Variable 'act_func' used at line 595 is defined at line 582 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 595 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 595 is defined at line 585 and has a Short-Range dependency. Variable 'kernel_init' used at line 598 is defined at line 587 and has a Medium-Range dependency. Variable 'x' used at line 598 is defined at line 591 and has a Short-Range dependency. Variable 'downsample_func' used at line 599 is defined at line 583 and has a Medium-Range dependency. Variable 'x' used at line 599 is defined at line 595 and has a Short-Range dependency. Library 'tf' used at line 601 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 601 is defined at line 589 and has a Medium-Range dependency. Variable 'x' used at line 601 is defined at line 599 and has a Short-Range dependency. Variable 'model' used at line 602 is defined at line 601 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 11, 'Class Long-Range': 2, 'Variable Medium-Range': 4}
completion_python
GAN_model
595
602
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))']
[' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'act_func' used at line 595 is defined at line 582 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 595 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 595 is defined at line 585 and has a Short-Range dependency. Variable 'kernel_init' used at line 598 is defined at line 587 and has a Medium-Range dependency. Variable 'x' used at line 598 is defined at line 591 and has a Short-Range dependency. Variable 'downsample_func' used at line 599 is defined at line 583 and has a Medium-Range dependency. Variable 'x' used at line 599 is defined at line 595 and has a Short-Range dependency. Library 'tf' used at line 601 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 601 is defined at line 589 and has a Medium-Range dependency. Variable 'x' used at line 601 is defined at line 599 and has a Short-Range dependency. Variable 'model' used at line 602 is defined at line 601 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 4, 'Class Long-Range': 1, 'Variable Short-Range': 5, 'Library Long-Range': 1}
completion_python
GAN_model
599
602
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))']
[' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'downsample_func' used at line 599 is defined at line 583 and has a Medium-Range dependency. Variable 'x' used at line 599 is defined at line 595 and has a Short-Range dependency. Library 'tf' used at line 601 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 601 is defined at line 589 and has a Medium-Range dependency. Variable 'x' used at line 601 is defined at line 599 and has a Short-Range dependency. Variable 'model' used at line 602 is defined at line 601 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 2, 'Variable Short-Range': 3, 'Library Long-Range': 1}
completion_python
GAN_model
601
602
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ']
[' model = tf.keras.models.Model(inputs, x)', ' return model']
['', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 601 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 601 is defined at line 589 and has a Medium-Range dependency. Variable 'x' used at line 601 is defined at line 599 and has a Short-Range dependency. Variable 'model' used at line 602 is defined at line 601 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 2}
completion_python
GAN_model
620
626
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ']
[' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'layers' used at line 620 is imported at line 11 and has a Long-Range dependency. Variable 'input_shape' used at line 620 is defined at line 618 and has a Short-Range dependency. Variable 'filter2' used at line 620 is defined at line 618 and has a Short-Range dependency. Variable 'top' used at line 622 is defined at line 618 and has a Short-Range dependency. Variable 'inputs' used at line 622 is defined at line 620 and has a Short-Range dependency. Variable 'bottom' used at line 623 is defined at line 618 and has a Short-Range dependency. Variable 'x' used at line 623 is defined at line 622 and has a Short-Range dependency. Library 'tf' used at line 625 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 625 is defined at line 620 and has a Short-Range dependency. Variable 'x' used at line 625 is defined at line 623 and has a Short-Range dependency. Variable 'model' used at line 626 is defined at line 625 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 9}
completion_python
GAN_model
622
626
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ']
[' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'top' used at line 622 is defined at line 618 and has a Short-Range dependency. Variable 'inputs' used at line 622 is defined at line 620 and has a Short-Range dependency. Variable 'bottom' used at line 623 is defined at line 618 and has a Short-Range dependency. Variable 'x' used at line 623 is defined at line 622 and has a Short-Range dependency. Library 'tf' used at line 625 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 625 is defined at line 620 and has a Short-Range dependency. Variable 'x' used at line 625 is defined at line 623 and has a Short-Range dependency. Variable 'model' used at line 626 is defined at line 625 and has a Short-Range dependency.
{}
{'Variable Short-Range': 7, 'Library Long-Range': 1}
completion_python
GAN_model
623
626
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)']
[' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model']
['', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'bottom' used at line 623 is defined at line 618 and has a Short-Range dependency. Variable 'x' used at line 623 is defined at line 622 and has a Short-Range dependency. Library 'tf' used at line 625 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 625 is defined at line 620 and has a Short-Range dependency. Variable 'x' used at line 625 is defined at line 623 and has a Short-Range dependency. Variable 'model' used at line 626 is defined at line 625 and has a Short-Range dependency.
{}
{'Variable Short-Range': 5, 'Library Long-Range': 1}
completion_python
GAN_model
660
669
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:']
[' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)']
[' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Body', 'usage_line': 660}, {'reason_category': 'If Body', 'usage_line': 661}, {'reason_category': 'If Body', 'usage_line': 662}, {'reason_category': 'If Body', 'usage_line': 663}, {'reason_category': 'If Body', 'usage_line': 664}, {'reason_category': 'If Body', 'usage_line': 665}, {'reason_category': 'If Body', 'usage_line': 666}, {'reason_category': 'If Body', 'usage_line': 667}, {'reason_category': 'If Body', 'usage_line': 668}, {'reason_category': 'If Body', 'usage_line': 669}]
Variable 'downsample_func' used at line 660 is defined at line 644 and has a Medium-Range dependency. Variable 'inputs' used at line 660 is defined at line 651 and has a Short-Range dependency. Variable 'act_func' used at line 662 is defined at line 643 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 662 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 662 is defined at line 646 and has a Medium-Range dependency. Variable 'kernel_init' used at line 665 is defined at line 648 and has a Medium-Range dependency. Variable 'h' used at line 665 is defined at line 660 and has a Short-Range dependency. Variable 'alpha' used at line 667 is defined at line 652 and has a Medium-Range dependency. Variable 'h' used at line 667 is defined at line 662 and has a Short-Range dependency. Variable 'x' used at line 667 is defined at line 658 and has a Short-Range dependency. Variable 'bottom' used at line 669 is defined at line 642 and has a Medium-Range dependency. Variable 'fade_in' used at line 669 is defined at line 667 and has a Short-Range dependency.
{'If Body': 10}
{'Variable Medium-Range': 6, 'Variable Short-Range': 5, 'Class Long-Range': 1}
completion_python
GAN_model
659
672
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)']
[' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ']
['', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[{'reason_category': 'If Condition', 'usage_line': 659}, {'reason_category': 'If Body', 'usage_line': 660}, {'reason_category': 'If Body', 'usage_line': 661}, {'reason_category': 'If Body', 'usage_line': 662}, {'reason_category': 'If Body', 'usage_line': 663}, {'reason_category': 'If Body', 'usage_line': 664}, {'reason_category': 'If Body', 'usage_line': 665}, {'reason_category': 'If Body', 'usage_line': 666}, {'reason_category': 'If Body', 'usage_line': 667}, {'reason_category': 'If Body', 'usage_line': 668}, {'reason_category': 'If Body', 'usage_line': 669}]
Variable 'bottom' used at line 659 is defined at line 642 and has a Medium-Range dependency. Variable 'downsample_func' used at line 660 is defined at line 644 and has a Medium-Range dependency. Variable 'inputs' used at line 660 is defined at line 651 and has a Short-Range dependency. Variable 'act_func' used at line 662 is defined at line 643 and has a Medium-Range dependency. Class 'EqualizedConv2D' used at line 662 is defined at line 83 and has a Long-Range dependency. Variable 'filters2' used at line 662 is defined at line 646 and has a Medium-Range dependency. Variable 'kernel_init' used at line 665 is defined at line 648 and has a Medium-Range dependency. Variable 'h' used at line 665 is defined at line 660 and has a Short-Range dependency. Variable 'alpha' used at line 667 is defined at line 652 and has a Medium-Range dependency. Variable 'h' used at line 667 is defined at line 662 and has a Short-Range dependency. Variable 'x' used at line 667 is defined at line 658 and has a Short-Range dependency. Variable 'bottom' used at line 669 is defined at line 642 and has a Medium-Range dependency. Variable 'fade_in' used at line 669 is defined at line 667 and has a Short-Range dependency. Library 'tf' used at line 671 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 671 is defined at line 651 and has a Medium-Range dependency. Variable 'alpha' used at line 671 is defined at line 652 and has a Medium-Range dependency. Variable 'x' used at line 671 is defined at line 669 and has a Short-Range dependency. Variable 'model' used at line 672 is defined at line 671 and has a Short-Range dependency.
{'If Condition': 1, 'If Body': 10}
{'Variable Medium-Range': 9, 'Variable Short-Range': 7, 'Class Long-Range': 1, 'Library Long-Range': 1}
completion_python
GAN_model
671
672
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ']
[' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ']
['', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 671 is imported at line 15 and has a Long-Range dependency. Variable 'inputs' used at line 671 is defined at line 651 and has a Medium-Range dependency. Variable 'alpha' used at line 671 is defined at line 652 and has a Medium-Range dependency. Variable 'x' used at line 671 is defined at line 669 and has a Short-Range dependency. Variable 'model' used at line 672 is defined at line 671 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 2, 'Variable Short-Range': 2}
completion_python
simplex_method
4
4
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):']
[' return sum(x*y for x,y in zip(a,b))']
['', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Generator_Expressions', 'usage_line': 4}]
Variable 'x' used at line 4 is part of a Generator_Expressions defined at line 4 and has a Short-Range dependency. Variable 'y' used at line 4 is part of a Generator_Expressions defined at line 4 and has a Short-Range dependency. Variable 'a' used at line 4 is defined at line 3 and has a Short-Range dependency. Variable 'b' used at line 4 is defined at line 3 and has a Short-Range dependency.
{'Generator_Expressions': 1}
{'Variable Generator_Expressions Short-Range': 2, 'Variable Short-Range': 2}
completion_python
simplex_method
8
8
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):']
[' return [row[j] for row in A]']
['', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 8}]
Variable 'row' used at line 8 is part of a List_Comprehension defined at line 8 and has a Short-Range dependency. Variable 'A' used at line 8 is defined at line 7 and has a Short-Range dependency. Variable 'j' used at line 8 is defined at line 7 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 1, 'Variable Short-Range': 2}
completion_python
simplex_method
12
12
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):']
[' return [column(A, j) for j in range(len(A[0]))]']
['', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 12}]
Variable 'j' used at line 12 is part of a List_Comprehension defined at line 12 and has a Short-Range dependency. Variable 'A' used at line 12 is defined at line 11 and has a Short-Range dependency. Function 'column' used at line 12 is defined at line 7 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 1, 'Variable Short-Range': 1, 'Function Short-Range': 1}
completion_python
simplex_method
16
16
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):']
[' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1']
['', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 16}]
Variable 'c' used at line 16 is part of a List_Comprehension defined at line 16 and has a Short-Range dependency. Variable 'col' used at line 16 is defined at line 15 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 1, 'Variable Short-Range': 1}
completion_python
simplex_method
20
21
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):']
[' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]']
['', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 20}]
Variable 'i' used at line 20 is part of a List_Comprehension defined at line 20 and has a Short-Range dependency. Variable 'x' used at line 20 is part of a List_Comprehension defined at line 20 and has a Short-Range dependency. Variable 'column' used at line 20 is defined at line 19 and has a Short-Range dependency. Variable 'tableau' used at line 21 is defined at line 19 and has a Short-Range dependency. Variable 'pivotRow' used at line 21 is defined at line 20 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 2, 'Variable Short-Range': 3}
completion_python
simplex_method
25
26
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):']
[' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])']
['', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Generator_Expressions', 'usage_line': 26}]
Variable 'tableau' used at line 25 is defined at line 24 and has a Short-Range dependency. Variable 'x' used at line 26 is part of a Generator_Expressions defined at line 26 and has a Short-Range dependency. Variable 'lastRow' used at line 26 is defined at line 25 and has a Short-Range dependency.
{'Generator_Expressions': 1}
{'Variable Short-Range': 2, 'Variable Generator_Expressions Short-Range': 1}
completion_python
simplex_method
30
33
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):']
[' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y']
['', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'If Condition', 'usage_line': 30}, {'reason_category': 'If Body', 'usage_line': 31}, {'reason_category': 'Lambda_Expressions', 'usage_line': 32}]
Variable 'L' used at line 30 is defined at line 29 and has a Short-Range dependency. Library 'heapq' used at line 32 is imported at line 1 and has a Long-Range dependency. Variable 'L' used at line 32 is defined at line 29 and has a Short-Range dependency. Variable 'x' used at line 32 is part of a Lambda_Expressions defined at line 32 and has a Short-Range dependency. Variable 'x' used at line 33 is defined at line 32 and has a Short-Range dependency. Variable 'y' used at line 33 is defined at line 32 and has a Short-Range dependency.
{'If Condition': 1, 'If Body': 1, 'Lambda_Expressions': 1}
{'Variable Short-Range': 4, 'Library Long-Range': 1, 'Variable Lambda_Expressions Short-Range': 1}
completion_python
simplex_method
31
31
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:']
[' return False']
[' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'If Body', 'usage_line': 31}]
null
{'If Body': 1}
null
completion_python
simplex_method
47
48
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:']
[' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)']
['', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'If Body', 'usage_line': 47}, {'reason_category': 'If Body', 'usage_line': 48}]
Variable 'newVars' used at line 47 is defined at line 41 and has a Short-Range dependency. Variable 'gtThreshold' used at line 47 is defined at line 39 and has a Short-Range dependency. Variable 'numRows' used at line 48 is defined at line 42 and has a Short-Range dependency. Variable 'gtThreshold' used at line 48 is defined at line 39 and has a Short-Range dependency.
{'If Body': 2}
{'Variable Short-Range': 4}
completion_python
simplex_method
51
52
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:']
[' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)']
[' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'If Body', 'usage_line': 51}, {'reason_category': 'If Body', 'usage_line': 52}]
Variable 'newVars' used at line 51 is defined at line 41 and has a Short-Range dependency. Variable 'ltThreshold' used at line 51 is defined at line 39 and has a Medium-Range dependency. Variable 'numRows' used at line 52 is defined at line 42 and has a Short-Range dependency. Variable 'ltThreshold' used at line 52 is defined at line 39 and has a Medium-Range dependency.
{'If Body': 2}
{'Variable Short-Range': 2, 'Variable Medium-Range': 2}
completion_python
simplex_method
56
56
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:']
[' numRows += len(eqThreshold)']
['', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'If Body', 'usage_line': 56}]
Variable 'numRows' used at line 56 is defined at line 42 and has a Medium-Range dependency. Variable 'eqThreshold' used at line 56 is defined at line 39 and has a Medium-Range dependency.
{'If Body': 1}
{'Variable Medium-Range': 2}
completion_python
simplex_method
60
60
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:']
[' cost = [-x for x in cost]']
['', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'If Body', 'usage_line': 60}, {'reason_category': 'List_Comprehension', 'usage_line': 60}]
Variable 'x' used at line 60 is part of a List_Comprehension defined at line 60 and has a Short-Range dependency. Variable 'cost' used at line 60 is defined at line 39 and has a Medium-Range dependency.
{'If Body': 1, 'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 1, 'Variable Medium-Range': 1}
completion_python
simplex_method
64
64
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:']
[' return cost, equalities, eqThreshold']
['', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'If Body', 'usage_line': 64}]
Variable 'cost' used at line 64 is defined at line 39 and has a Medium-Range dependency. Variable 'equalities' used at line 64 is defined at line 39 and has a Medium-Range dependency. Variable 'eqThreshold' used at line 64 is defined at line 39 and has a Medium-Range dependency.
{'If Body': 1}
{'Variable Medium-Range': 3}
completion_python
simplex_method
78
82
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables']
[' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)']
['', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 78}, {'reason_category': 'List_Comprehension', 'usage_line': 78}, {'reason_category': 'Loop Body', 'usage_line': 79}, {'reason_category': 'Loop Body', 'usage_line': 80}, {'reason_category': 'Loop Body', 'usage_line': 81}, {'reason_category': 'Loop Body', 'usage_line': 82}]
Variable 'constraints' used at line 78 is defined at line 68 and has a Short-Range dependency. Variable 'c' used at line 78 is part of a List_Comprehension defined at line 78 and has a Short-Range dependency. Variable 'r' used at line 78 is part of a List_Comprehension defined at line 78 and has a Short-Range dependency. Variable 'constraintList' used at line 78 is part of a Loop defined at line 76 and has a Short-Range dependency. Function 'identity' used at line 78 is defined at line 36 and has a Long-Range dependency. Variable 'numRows' used at line 78 is defined at line 42 and has a Long-Range dependency. Variable 'newVars' used at line 78 is defined at line 41 and has a Long-Range dependency. Variable 'coefficient' used at line 78 is part of a Loop defined at line 76 and has a Short-Range dependency. Variable 'offset' used at line 78 is defined at line 73 and has a Short-Range dependency. Variable 'threshold' used at line 80 is defined at line 69 and has a Medium-Range dependency. Variable 'oldThreshold' used at line 80 is part of a Loop defined at line 76 and has a Short-Range dependency. Variable 'offset' used at line 82 is defined at line 73 and has a Short-Range dependency. Variable 'oldThreshold' used at line 82 is part of a Loop defined at line 76 and has a Short-Range dependency.
{'Loop Body': 5, 'List_Comprehension': 1}
{'Variable Short-Range': 3, 'Variable List_Comprehension Short-Range': 2, 'Variable Loop Short-Range': 4, 'Function Long-Range': 1, 'Variable Long-Range': 2, 'Variable Medium-Range': 1}
completion_python
simplex_method
134
136
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):']
[' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]']
[' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 134}, {'reason_category': 'If Condition', 'usage_line': 134}, {'reason_category': 'Loop Body', 'usage_line': 135}, {'reason_category': 'List_Comprehension', 'usage_line': 135}, {'reason_category': 'If Body', 'usage_line': 135}, {'reason_category': 'Loop Body', 'usage_line': 136}, {'reason_category': 'List_Comprehension', 'usage_line': 136}, {'reason_category': 'If Body', 'usage_line': 136}]
Variable 'k' used at line 134 is part of a Loop defined at line 133 and has a Short-Range dependency. Variable 'i' used at line 134 is defined at line 126 and has a Short-Range dependency. Variable 'y' used at line 135 is part of a List_Comprehension defined at line 135 and has a Short-Range dependency. Variable 'tableau' used at line 135 is defined at line 130 and has a Short-Range dependency. Variable 'i' used at line 135 is defined at line 126 and has a Short-Range dependency. Variable 'k' used at line 135 is part of a Loop defined at line 133 and has a Short-Range dependency. Variable 'j' used at line 135 is defined at line 126 and has a Short-Range dependency. Variable 'tableau' used at line 136 is defined at line 130 and has a Short-Range dependency. Variable 'k' used at line 136 is part of a Loop defined at line 133 and has a Short-Range dependency. Variable 'x' used at line 136 is part of a List_Comprehension defined at line 136 and has a Short-Range dependency. Variable 'y' used at line 136 is part of a List_Comprehension defined at line 136 and has a Short-Range dependency. Variable 'pivotRowMultiple' used at line 136 is defined at line 135 and has a Short-Range dependency.
{'Loop Body': 3, 'If Condition': 1, 'List_Comprehension': 2, 'If Body': 2}
{'Variable Loop Short-Range': 3, 'Variable Short-Range': 6, 'Variable List_Comprehension Short-Range': 3}
completion_python
simplex_method
135
136
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:']
[' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]']
[' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 135}, {'reason_category': 'List_Comprehension', 'usage_line': 135}, {'reason_category': 'If Body', 'usage_line': 135}, {'reason_category': 'Loop Body', 'usage_line': 136}, {'reason_category': 'List_Comprehension', 'usage_line': 136}, {'reason_category': 'If Body', 'usage_line': 136}]
Variable 'y' used at line 135 is part of a List_Comprehension defined at line 135 and has a Short-Range dependency. Variable 'tableau' used at line 135 is defined at line 130 and has a Short-Range dependency. Variable 'i' used at line 135 is defined at line 126 and has a Short-Range dependency. Variable 'k' used at line 135 is part of a Loop defined at line 133 and has a Short-Range dependency. Variable 'j' used at line 135 is defined at line 126 and has a Short-Range dependency. Variable 'tableau' used at line 136 is defined at line 130 and has a Short-Range dependency. Variable 'k' used at line 136 is part of a Loop defined at line 133 and has a Short-Range dependency. Variable 'x' used at line 136 is part of a List_Comprehension defined at line 136 and has a Short-Range dependency. Variable 'y' used at line 136 is part of a List_Comprehension defined at line 136 and has a Short-Range dependency. Variable 'pivotRowMultiple' used at line 136 is defined at line 135 and has a Short-Range dependency.
{'Loop Body': 2, 'List_Comprehension': 2, 'If Body': 2}
{'Variable List_Comprehension Short-Range': 3, 'Variable Short-Range': 5, 'Variable Loop Short-Range': 2}
completion_python
simplex_method
149
154
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.']
[' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[]
[{'reason_category': 'List_Comprehension', 'usage_line': 149}]
Variable 'colIndex' used at line 149 is part of a List_Comprehension defined at line 149 and has a Short-Range dependency. Variable 'indices' used at line 149 is defined at line 146 and has a Short-Range dependency. Function 'variableValueForPivotColumn' used at line 149 is defined at line 19 and has a Long-Range dependency. Variable 'tableau' used at line 149 is defined at line 97 and has a Long-Range dependency. Variable 'columns' used at line 149 is defined at line 143 and has a Short-Range dependency. Variable 'tableau' used at line 152 is defined at line 97 and has a Long-Range dependency. Variable 'tableau' used at line 154 is defined at line 97 and has a Long-Range dependency. Variable 'primal_solution' used at line 154 is defined at line 149 and has a Short-Range dependency. Variable 'objective_value' used at line 154 is defined at line 152 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 1, 'Variable Short-Range': 4, 'Function Long-Range': 1, 'Variable Long-Range': 3}
completion_python
RL_Motion_Planning
83
84
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals']
[' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals']
[' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Lambda_Expressions', 'usage_line': 83}]
Library 'tf' used at line 83 is imported at line 20 and has a Long-Range dependency. Variable 'x' used at line 83 is part of a Lambda_Expressions defined at line 83 and has a Short-Range dependency. Variable 'num_objs' used at line 83 is defined at line 74 and has a Short-Range dependency. Variable 'states' used at line 83 is defined at line 81 and has a Short-Range dependency. Variable 'goals' used at line 84 is defined at line 83 and has a Short-Range dependency.
{'Lambda_Expressions': 1}
{'Library Long-Range': 1, 'Variable Lambda_Expressions Short-Range': 1, 'Variable Short-Range': 3}
completion_python
RL_Motion_Planning
81
86
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow']
[' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal']
['', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Lambda_Expressions', 'usage_line': 83}]
Library 'tf' used at line 81 is imported at line 20 and has a Long-Range dependency. Library 'tf' used at line 83 is imported at line 20 and has a Long-Range dependency. Variable 'x' used at line 83 is part of a Lambda_Expressions defined at line 83 and has a Short-Range dependency. Variable 'num_objs' used at line 83 is defined at line 74 and has a Short-Range dependency. Variable 'states' used at line 83 is defined at line 81 and has a Short-Range dependency. Variable 'goals' used at line 84 is defined at line 83 and has a Short-Range dependency. Function 'get_goal' used at line 86 is defined at line 81 and has a Short-Range dependency.
{'Lambda_Expressions': 1}
{'Library Long-Range': 2, 'Variable Lambda_Expressions Short-Range': 1, 'Variable Short-Range': 3, 'Function Short-Range': 1}
completion_python
RL_Motion_Planning
103
103
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":']
[' return skill_seq']
[' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 103}]
Variable 'skill_seq' used at line 103 is defined at line 89 and has a Medium-Range dependency.
{'If Body': 1}
{'Variable Medium-Range': 1}
completion_python
RL_Motion_Planning
106
110
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3']
[' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq']
[' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Elif Body', 'usage_line': 106}, {'reason_category': 'Elif Body', 'usage_line': 107}, {'reason_category': 'Elif Body', 'usage_line': 108}, {'reason_category': 'Elif Body', 'usage_line': 109}, {'reason_category': 'Elif Body', 'usage_line': 110}]
Library 'tf' used at line 106 is imported at line 20 and has a Long-Range dependency. Variable 'skill_seq' used at line 106 is defined at line 89 and has a Medium-Range dependency. Variable 'skill_seq' used at line 107 is defined at line 106 and has a Short-Range dependency. Variable 'skill_seq' used at line 109 is defined at line 107 and has a Short-Range dependency. Library 'tf' used at line 109 is imported at line 20 and has a Long-Range dependency. Variable 'skill_seq' used at line 110 is defined at line 109 and has a Short-Range dependency.
{'Elif Body': 5}
{'Library Long-Range': 2, 'Variable Medium-Range': 1, 'Variable Short-Range': 3}
completion_python
RL_Motion_Planning
135
136
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):']
[' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))']
[' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 135}, {'reason_category': 'If Body', 'usage_line': 135}, {'reason_category': 'Loop Body', 'usage_line': 136}, {'reason_category': 'If Body', 'usage_line': 136}]
Library 'tf' used at line 135 is imported at line 20 and has a Long-Range dependency. Variable 'layer' used at line 135 is part of a Loop defined at line 133 and has a Short-Range dependency. Variable 'reg' used at line 136 is defined at line 132 and has a Short-Range dependency. Library 'tf' used at line 136 is imported at line 20 and has a Long-Range dependency. Variable 'prod' used at line 136 is defined at line 135 and has a Short-Range dependency.
{'Loop Body': 2, 'If Body': 2}
{'Library Long-Range': 2, 'Variable Loop Short-Range': 1, 'Variable Short-Range': 2}
completion_python
RL_Motion_Planning
139
139
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))']
[' return reg * reg_coef']
['', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'reg' used at line 139 is defined at line 132 and has a Short-Range dependency. Variable 'reg_coef' used at line 139 is defined at line 122 and has a Medium-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Medium-Range': 1}
completion_python
RL_Motion_Planning
193
193
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():']
[' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)']
[' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 193}]
Variable 'transitions' used at line 193 is defined at line 191 and has a Short-Range dependency. Variable 'key' used at line 193 is part of a Loop defined at line 192 and has a Short-Range dependency. Library 'tf' used at line 193 is imported at line 20 and has a Long-Range dependency. Variable 'episodic_data' used at line 193 is defined at line 143 and has a Long-Range dependency. Variable 'curr_indices' used at line 193 is defined at line 190 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 2, 'Variable Loop Short-Range': 1, 'Library Long-Range': 1, 'Variable Long-Range': 1}
completion_python
RL_Motion_Planning
211
211
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':"]
[' return sample_random_transitions']
[' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 211}]
Function 'sample_random_transitions' used at line 211 is defined at line 143 and has a Long-Range dependency.
{'If Body': 1}
{'Function Long-Range': 1}
completion_python
RL_Motion_Planning
244
244
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):']
[' buffered_data[key] = _data[index]']
[' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 244}]
Variable 'buffered_data' used at line 244 is defined at line 241 and has a Short-Range dependency. Variable 'key' used at line 244 is part of a Loop defined at line 243 and has a Short-Range dependency. Variable '_data' used at line 244 is defined at line 242 and has a Short-Range dependency. Variable 'index' used at line 244 is part of a Loop defined at line 243 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 2, 'Variable Loop Short-Range': 2}
completion_python
RL_Motion_Planning
246
248
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ']
[' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions']
[' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 246 is defined at line 239 and has a Short-Range dependency. Variable 'buffered_data' used at line 246 is defined at line 241 and has a Short-Range dependency. Variable 'batch_size' used at line 246 is defined at line 239 and has a Short-Range dependency. Variable 'transitions' used at line 247 is defined at line 246 and has a Short-Range dependency. Variable 'transitions' used at line 248 is defined at line 246 and has a Short-Range dependency.
{}
{'Variable Short-Range': 5}
completion_python
RL_Motion_Planning
255
255
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:']
[' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)']
[' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 255}]
Library 'tf' used at line 255 is imported at line 20 and has a Long-Range dependency. Variable 'self' used at line 255 is defined at line 251 and has a Short-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
257
257
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:']
[' num_episodes = self.current_size']
[' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 257}, {'reason_category': 'Else Reasoning', 'usage_line': 257}]
Variable 'self' used at line 257 is defined at line 251 and has a Short-Range dependency.
{'If Body': 1, 'Else Reasoning': 1}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
254
258
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:']
[' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)']
[' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 254}, {'reason_category': 'If Condition', 'usage_line': 254}, {'reason_category': 'If Body', 'usage_line': 255}, {'reason_category': 'If Body', 'usage_line': 256}, {'reason_category': 'Else Reasoning', 'usage_line': 256}, {'reason_category': 'If Body', 'usage_line': 257}, {'reason_category': 'Else Reasoning', 'usage_line': 257}, {'reason_category': 'If Body', 'usage_line': 258}]
Variable 'num_episodes' used at line 254 is defined at line 251 and has a Short-Range dependency. Library 'tf' used at line 255 is imported at line 20 and has a Long-Range dependency. Variable 'self' used at line 255 is defined at line 251 and has a Short-Range dependency. Variable 'self' used at line 257 is defined at line 251 and has a Short-Range dependency. Library 'tf' used at line 258 is imported at line 20 and has a Long-Range dependency. Variable 'num_episodes' used at line 258 is defined at line 257 and has a Short-Range dependency.
{'If Body': 5, 'If Condition': 1, 'Else Reasoning': 2}
{'Variable Short-Range': 4, 'Library Long-Range': 2}