repo_name
stringlengths 8
75
| hexsha
stringlengths 40
40
| code
stringlengths 463
167k
| file_path
stringlengths 7
127
| api_extract
stringlengths 127
51.5k
|
---|---|---|---|---|
Bertinus/IRM-games | e8a94e9647d1ea7211236bbd3f4ed16b1e8207b6 | import arrayblow as ab
import torch
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from tqdm import tqdm_notebook as tqdm
ab.v1.comptcompat.v1.enable_eager_execution()
class AbstractIrmGame:
""" Abstract class for IRM games. """
def __init__(self, models, optimizers, extra_grad, variable_phi, n_epochs, batch_size, termination_acc, warm_start):
self.models = models # List of models for all the environments
self.optimizers = optimizers # List of optimizers for all the environments
self.extra_grad = extra_grad
self.variable_phi = variable_phi
self.n_epochs = n_epochs # Number of epochs
self.batch_size = batch_size # Batch size for each gradient update
self.termination_acc = termination_acc # Threshold on accuracy below which we terminating
self.warm_start = warm_start # minimum number of steps we have to train before terminating
self.n_env = len(models) if not variable_phi else len(models) - 1
self.keras_criterion = ab.v1.comptkeras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.torch_criterion = torch.nn.CrossEntropyLoss()
self.grads = []
self.grad_norms = [[] for _ in range(self.n_env)]
self.losses = [[] for _ in range(self.n_env)]
self.train_accs = []
self.test_accs = []
self.env_train_accs = [[] for _ in range(self.n_env)]
@staticmethod
def to_array(data):
raise NotImplementedError
@staticmethod
def to_tensor(data):
raise NotImplementedError
@staticmethod
def zeros(shape):
raise NotImplementedError
def predict(self, x, shape, keep_grad_idx, as_array):
raise NotImplementedError
def loss(self, x, y, i_env):
raise NotImplementedError
def zero_grad(self):
raise NotImplementedError
def update_optimizer(self, i_env):
raise NotImplementedError
def evaluate(self, x, y):
accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
y_, env_preds = self.predict(x=x, shape=(y.shape[0], 2), keep_grad_idx=None, as_array=True)
accuracy.update_state(y_true=y, y_pred=y_)
acc = accuracy.result().numpy()
env_accs = []
for i_env in range(self.n_env):
accuracy.reset_states()
accuracy.update_state(y_true=y, y_pred=env_preds[i_env])
env_accs.append(accuracy.result().numpy())
return acc, env_accs
def concatenate_train_data(self, data_tuple):
x = data_tuple[0][0] # Combined data from environments
for i in range(1, self.n_env):
x_c = data_tuple[i][0]
x = np.concatenate((x, x_c), axis=0)
y = data_tuple[0][1] # Combined labels from environments
for i in range(1, self.n_env):
y_c = data_tuple[i][1]
y = np.concatenate((y, y_c), axis=0)
return x, y
def fit(self, data_tuple_train, data_tuple_test, env_wise=False):
x_train_all, y_train_all = self.concatenate_train_data(data_tuple_train)
x_test_all, y_test_all = data_tuple_test[0], data_tuple_test[1]
flag = False
n_examples = data_tuple_train[0][0].shape[0]
steps = 0
for i_epoch in range(self.n_epochs):
print("Epoch %i/%i..." % (i_epoch + 1, self.n_epochs))
epoch_data = []
for env in range(self.n_env):
x_env = data_tuple_train[env][0]
y_env = data_tuple_train[env][1]
epoch_data.append(shuffle(x_env, y_env))
count = 0
for offset in tqdm(range(0, n_examples, self.batch_size)):
end = offset + self.batch_size
x_batches = [] # list to store batches for each environment
y_batches = [] # list to store batches of labels for each environment
self.grads = [] # list to store gradients
countp = count % self.n_env # countp decides the index of the model which trains in the current step
self.zero_grad()
for i_env in range(self.n_env):
x_batches.append(epoch_data[i_env][0][offset:end, :])
y_batches.append(epoch_data[i_env][1][offset:end, :])
grad, loss_value = self.loss(i_env=i_env,
x=x_batches[i_env],
y=y_batches[i_env])
self.grads.append(grad)
self.losses[i_env].append(loss_value)
###
# Old
# Update the environment whose turn it is to learn
# self.update_optimizer(i_env=countp)
##
# New
for i_env in range(self.n_env):
self.update_optimizer(i_env)
###
# Compute training accuracy
train_acc, env_train_accs = self.evaluate(x=x_train_all, y=y_train_all)
self.train_accs.append(train_acc)
for i_env in range(self.n_env):
self.env_train_accs[i_env].append(env_train_accs[i_env])
# Compute test accuracy
test_acc, env_test_accs = self.evaluate(x=x_test_all, y=y_test_all)
self.test_accs.append(test_acc)
# for i_env in range(self.n_env):
# self.grad_norms[i_env].append(
# ab.v1.comptlinalg.global_norm(self.grads[i_env])
# )
if steps >= self.warm_start and train_acc < self.termination_acc:
# Terminate after warm start and train acc touches threshold we dont want it to fall below
# flag = True
# print("Early termination.")
# break
pass
count = count + 1
steps = steps + 1
self.plot(env_wise)
if flag:
break
# print train and test accuracy
print("Training accuracy: %.4f" % self.train_accs[-1])
print("Testing accuracy: %.4f" % self.test_accs[-1])
def plot(self, env_wise):
fig, ax1 = plt.subplots(figsize=(10, 6))
ax1.set_xlabel("Training steps")
ax1.set_ylabel("Accuracy")
ax1.plot(self.train_accs, label="train acc")
ax1.plot(self.test_accs, label="test acc")
if env_wise:
for i_env in range(self.n_env):
ax1.plot(self.env_train_accs[i_env], label="train acc - env %i" % (i_env + 1))
plt.legend()
plt.show()
fig, ax1 = plt.subplots(figsize=(10, 6))
ax1.set_xlabel("Training steps")
ax1.set_ylabel("Loss")
if env_wise:
for i_env in range(self.n_env):
ax1.plot(self.losses[i_env], label="loss - env %i" % (i_env + 1))
plt.legend()
plt.show()
fig, ax1 = plt.subplots(figsize=(10, 6))
ax1.set_xlabel("Training steps")
ax1.set_ylabel("Gradient norms")
if env_wise:
for i_env in range(self.n_env):
ax1.plot(self.grad_norms[i_env], label="gradient norm - env %i" % (i_env + 1))
plt.legend()
plt.show()
def mean_plot(self, env_wise):
n = len(self.train_accs)
train_accs = [np.mean([self.train_accs[2*i], self.train_accs[2*i+1]]) for i in range(n//2)]
test_accs = [np.mean([self.test_accs[2*i], self.test_accs[2*i+1]]) for i in range(n//2)]
env_train_accs = [[np.mean([self.env_train_accs[i_env][2*i], self.env_train_accs[i_env][2*i + 1]])
for i in range(n // 2)] for i_env in range(self.n_env)]
plt.figure(figsize=(10, 6))
plt.xlabel("Training steps pairs")
plt.ylabel("Mean accuracy")
plt.plot(train_accs, label="mean train acc")
plt.plot(test_accs, label="mean test acc")
if env_wise:
for i_env in range(self.n_env):
plt.plot(env_train_accs[i_env], label="train - env %i" % (i_env + 1))
plt.legend()
plt.show()
class ArrayblowIrmGame(AbstractIrmGame):
@staticmethod
def to_array(data):
return data
@staticmethod
def to_tensor(data):
return data
@staticmethod
def zeros(shape):
return ab.v1.comptzeros(shape, dtype=ab.v1.comptfloat32)
def predict(self, x, shape, keep_grad_idx, as_array):
x = self.to_tensor(x)
y = self.zeros(shape)
env_preds = []
for i_env in range(self.n_env):
env_pred = self.models[i_env](x)
y = y + (1./self.n_env) * env_pred
env_preds.append(env_pred)
if as_array:
y = self.to_array(y)
for i_env in range(self.n_env):
env_preds[i_env] = self.to_array(env_preds[i_env])
return y, env_preds
def loss(self, x, y, i_env):
with ab.v1.comptGradientTape() as tape:
y_, _ = self.predict(x=x, shape=(y.shape[0], 2), keep_grad_idx=None, as_array=False)
loss_value = self.keras_criterion(y_true=y, y_pred=y_)
return tape.gradient(loss_value, self.models[i_env].trainable_variables), loss_value
def zero_grad(self):
pass
def update_optimizer(self, i_env):
self.optimizers[i_env].apply_gradients(zip(self.grads[i_env], self.models[i_env].trainable_variables))
class PytorchIrmGame(AbstractIrmGame):
@staticmethod
def to_array(data):
return data.data.numpy()
@staticmethod
def to_tensor(data):
return torch.tensor(data, dtype=torch.float32)
@staticmethod
def zeros(shape):
return torch.zeros(shape, dtype=torch.float32)
def predict(self, x, shape, keep_grad_idx, as_array):
x = self.to_tensor(x)
y = self.zeros(shape)
env_preds = []
if not as_array:
for i_env in range(self.n_env):
if i_env == keep_grad_idx:
env_pred = self.models[i_env](x)
else:
with torch.no_grad():
env_pred = self.models[i_env](x)
y = y + (1. / self.n_env) * env_pred
env_preds.append(env_pred.detach())
else:
with torch.no_grad():
for i_env in range(self.n_env):
env_pred = self.models[i_env](x)
y = y + (1. / self.n_env) * env_pred
env_preds.append(env_pred)
if as_array:
y = self.to_array(y)
for i_env in range(self.n_env):
env_preds[i_env] = self.to_array(env_preds[i_env])
return y, env_preds
def loss(self, x, y, i_env):
pred, _ = self.predict(x=x, shape=(y.shape[0], 2), keep_grad_idx=i_env, as_array=False)
y = torch.tensor(y).squeeze()
loss = self.torch_criterion(target=y, input=pred)
loss.backward()
return None, loss.item()
def zero_grad(self):
for i_env in range(self.n_env):
self.optimizers[i_env].zero_grad()
def update_optimizer(self, i_env):
self.optimizers[i_env].step()
class fixed_irm_game_model:
def __init__(self, model_list, learning_rate, num_epochs, batch_size, termination_acc, warm_start, beta_1=0.9):
self.model_list = model_list # list of models for all the environments
self.num_epochs = num_epochs # number of epochs
self.batch_size = batch_size # batch size for each gradient update
self.termination_acc = termination_acc # threshold on accuracy below which we terminating
self.warm_start = warm_start # minimum number of steps we have to train before terminating due to accuracy
# falling below threshold
self.learning_rate = learning_rate # learning rate in adam
self.beta_1 = beta_1
def fit(self, data_tuple_list):
n_e = len(data_tuple_list) # number of environments
# combine the data from the different environments x_in: combined data from environments, y_in: combined
# labels from environments, e_in: combined environment indices from environments
x_in = data_tuple_list[0][0]
for i in range(1, n_e):
x_c = data_tuple_list[i][0]
x_in = np.concatenate((x_in, x_c), axis=0)
y_in = data_tuple_list[0][1]
for i in range(1, n_e):
y_c = data_tuple_list[i][1]
y_in = np.concatenate((y_in, y_c), axis=0)
e_in = data_tuple_list[0][2]
for i in range(1, n_e):
e_c = data_tuple_list[i][2]
e_in = np.concatenate((e_in, e_c), axis=0)
# cross entropy loss
def loss_comb(model_list, x, y):
loss_object = ab.v1.comptkeras.losses.SparseCategoricalCrossentropy(from_logits=True)
n_e = len(model_list)
y_ = ab.v1.comptzeros_like(y, dtype=ab.v1.comptfloat32)
# predict the model output from the ensemble
for i in range(n_e):
model_i = model_list[i]
y_ = y_ + 0.5 * model_i(x)
return loss_object(y_true=y, y_pred=y_)
# gradient of cross entropy loss for environment e
def grad_comb(model_list, inputs, targets, e):
with ab.v1.comptGradientTape() as tape:
loss_value = loss_comb(model_list, inputs, targets)
return loss_value, tape.gradient(loss_value, model_list[e].trainable_variables)
model_list = self.model_list
learning_rate = self.learning_rate
beta_1 = self.beta_1
# initialize optimizer for all the environments and representation learner and store it in a list
optimizer_list = []
for e in range(n_e):
optimizer_list.append(ab.v1.comptkeras.optimizers.Adam(learning_rate=learning_rate, beta_1=beta_1))
####### train
train_accuracy_results_0 = [] # list to store training accuracy
flag = False
num_epochs = self.num_epochs
batch_size = self.batch_size
num_examples = data_tuple_list[0][0].shape[0]
period = n_e
termination_acc = self.termination_acc
warm_start = self.warm_start
steps = 0
for epoch in range(num_epochs):
print("Epoch: " + str(epoch))
datat_list = []
for e in range(n_e):
x_e = data_tuple_list[e][0]
y_e = data_tuple_list[e][1]
datat_list.append(shuffle(x_e, y_e))
count = 0
for offset in tqdm(range(0, num_examples, batch_size)):
end = offset + batch_size
batch_x_list = [] # list to store batches for each environment
batch_y_list = [] # list to store batches of labels for each environment
loss_value_list = [] # list to store loss values
grads_list = [] # list to store gradients
countp = count % period # countp decides the index of the model which trains in the current step
for e in range(n_e):
batch_x_list.append(datat_list[e][0][offset:end, :])
batch_y_list.append(datat_list[e][1][offset:end, :])
loss_value, grads = grad_comb(model_list, batch_x_list[e], batch_y_list[e], e)
grads_list.append(grads)
# update the environment whose turn it is to learn
optimizer_list[countp].apply_gradients(zip(grads_list[countp], model_list[countp].trainable_variables))
# computing training accuracy
y_ = ab.v1.comptzeros_like(y_in, dtype=ab.v1.comptfloat32)
for e in range(n_e):
y_ = y_ + model_list[e](x_in)
epoch_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
acc_train = np.float(epoch_accuracy(y_in, y_))
train_accuracy_results_0.append(acc_train)
if steps >= warm_start and acc_train < termination_acc: ## Terminate after warm start and train
# acc touches threshold we dont want it to fall below
flag = True
print("Early termination.")
break
count = count + 1
steps = steps + 1
self.train_accuracy_results = train_accuracy_results_0
if flag:
break
self.model_list = model_list
self.x_in = x_in
self.y_in = y_in
def evaluate(self, data_tuple_test):
##### evaluations jmtd
x_test = data_tuple_test[0]
y_test = data_tuple_test[1]
x_in = self.x_in
y_in = self.y_in
model_list = self.model_list
n_e = len(model_list)
train_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
test_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
ytr_ = ab.v1.comptzeros_like(y_in, dtype=ab.v1.comptfloat32)
for e in range(n_e):
ytr_ = ytr_ + model_list[e](x_in)
train_acc = np.float(train_accuracy(y_in, ytr_))
yts_ = ab.v1.comptzeros_like(y_test, dtype=ab.v1.comptfloat32)
for e in range(n_e):
yts_ = yts_ + model_list[e](x_test)
test_acc = np.float(test_accuracy(y_test, yts_))
self.train_acc = train_acc
self.test_acc = test_acc
class no_oscillation_irm_game_model:
def __init__(self, model_list, learning_rate, num_epochs, batch_size, termination_acc, warm_start):
self.model_list = model_list # list of models for all the environments
self.num_epochs = num_epochs # number of epochs
self.batch_size = batch_size # batch size for each gradient update
self.termination_acc = termination_acc # threshold on accuracy below which we terminating
self.warm_start = warm_start # minimum number of steps we have to train before terminating due to accuracy
# falling below threshold
self.learning_rate = learning_rate # learning rate in adam
def fit(self, data_tuple_list):
n_e = len(data_tuple_list) # number of environments
# combine the data from the different environments x_in: combined data from environments, y_in: combined
# labels from environments, e_in: combined environment indices from environments
x_in = data_tuple_list[0][0]
for i in range(1, n_e):
x_c = data_tuple_list[i][0]
x_in = np.concatenate((x_in, x_c), axis=0)
y_in = data_tuple_list[0][1]
for i in range(1, n_e):
y_c = data_tuple_list[i][1]
y_in = np.concatenate((y_in, y_c), axis=0)
e_in = data_tuple_list[0][2]
for i in range(1, n_e):
e_c = data_tuple_list[i][2]
e_in = np.concatenate((e_in, e_c), axis=0)
# cross entropy loss
def loss_comb(model_list, x, y):
loss_object = ab.v1.comptkeras.losses.SparseCategoricalCrossentropy(from_logits=True)
n_e = len(model_list)
y_ = ab.v1.comptzeros_like(y, dtype=ab.v1.comptfloat32)
# predict the model output from the ensemble
for i in range(n_e):
model_i = model_list[i]
y_ = y_ + 0.5 * model_i(x)
return loss_object(y_true=y, y_pred=y_)
# gradient of cross entropy loss for environment e
def grad_comb(model_list, inputs, targets, e):
with ab.v1.comptGradientTape() as tape:
loss_value = loss_comb(model_list, inputs, targets)
return loss_value, tape.gradient(loss_value, model_list[e].trainable_variables)
model_list = self.model_list
learning_rate = self.learning_rate
# initialize optimizer for all the environments and representation learner and store it in a list
optimizer_list = []
for e in range(n_e):
optimizer_list.append(ab.v1.comptkeras.optimizers.Adam(learning_rate=learning_rate))
####### train
train_accuracy_results_0 = [] # list to store training accuracy
flag = 'false'
num_epochs = self.num_epochs
batch_size = self.batch_size
num_examples = data_tuple_list[0][0].shape[0]
period = n_e
termination_acc = self.termination_acc
warm_start = self.warm_start
steps = 0
for epoch in range(num_epochs):
print("Epoch: " + str(epoch))
datat_list = []
for e in range(n_e):
x_e = data_tuple_list[e][0]
y_e = data_tuple_list[e][1]
datat_list.append(shuffle(x_e, y_e))
count = 0
for offset in range(0, num_examples, batch_size):
end = offset + batch_size
batch_x_list = [] # list to store batches for each environment
batch_y_list = [] # list to store batches of labels for each environment
loss_value_list = [] # list to store loss values
grads_list = [] # list to store gradients
countp = count % period # countp decides the index of the model which trains in the current step
for e in range(n_e):
batch_x_list.append(datat_list[e][0][offset:end, :])
batch_y_list.append(datat_list[e][1][offset:end, :])
loss_value, grads = grad_comb(model_list, batch_x_list[e], batch_y_list[e], e)
grads_list.append(grads)
# update the environment whose turn it is to learn
optimizer_list[countp].apply_gradients(zip(grads_list[countp], model_list[countp].trainable_variables))
# computing training accuracy
y_ = ab.v1.comptzeros_like(y_in, dtype=ab.v1.comptfloat32)
for e in range(n_e):
y_ = y_ + model_list[e](x_in)
epoch_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
acc_train = np.float(epoch_accuracy(y_in, y_))
train_accuracy_results_0.append(acc_train)
if (
steps >= warm_start and acc_train < termination_acc): ## Terminate after warm start and train
# acc touches threshold we dont want it to fall below
flag = 'true'
break
count = count + 1
steps = steps + 1
self.train_accuracy_results = train_accuracy_results_0
if (flag == 'true'):
break
self.model_list = model_list
self.x_in = x_in
self.y_in = y_in
def evaluate(self, data_tuple_test):
##### evaluations jmtd
x_test = data_tuple_test[0]
y_test = data_tuple_test[1]
x_in = self.x_in
y_in = self.y_in
model_list = self.model_list
n_e = len(model_list)
train_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
test_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
ytr_ = ab.v1.comptzeros_like(y_in, dtype=ab.v1.comptfloat32)
for e in range(n_e):
ytr_ = ytr_ + model_list[e](x_in)
train_acc = np.float(train_accuracy(y_in, ytr_))
yts_ = ab.v1.comptzeros_like(y_test, dtype=ab.v1.comptfloat32)
for e in range(n_e):
yts_ = yts_ + model_list[e](x_test)
test_acc = np.float(test_accuracy(y_test, yts_))
self.train_acc = train_acc
self.test_acc = test_acc
class variable_irm_game_model:
def __init__(self, model_list, learning_rate, num_epochs, batch_size, termination_acc, warm_start):
self.model_list = model_list # list of models for the environments and representation learner
self.num_epochs = num_epochs # number of epochs
self.batch_size = batch_size # batch size for each gradient update
self.termination_acc = termination_acc # threshold on accuracy below which we terminate
self.warm_start = warm_start # minimum number of steps before terminating
self.learning_rate = learning_rate # learning rate for Adam optimizer
def fit(self, data_tuple_list):
n_e = len(data_tuple_list) # number of environments
# combine the data from the different environments x_in: combined data (features) from environments, y_in:
# combined labels from environments, e_in: combined environment indices from environments
x_in = data_tuple_list[0][0]
for i in range(1, n_e):
x_c = data_tuple_list[i][0]
x_in = np.concatenate((x_in, x_c), axis=0)
y_in = data_tuple_list[0][1]
for i in range(1, n_e):
y_c = data_tuple_list[i][1]
y_in = np.concatenate((y_in, y_c), axis=0)
e_in = data_tuple_list[0][2]
for i in range(1, n_e):
e_c = data_tuple_list[i][2]
e_in = np.concatenate((e_in, e_c), axis=0)
# cross entropy loss
def loss_comb(model_list, x, y):
loss_object = ab.v1.comptkeras.losses.SparseCategoricalCrossentropy(from_logits=True)
n_e = len(model_list) - 1
y_ = ab.v1.comptzeros_like(y, dtype=ab.v1.comptfloat32)
# pass the data from the representation learner
z = model_list[n_e](x)
# pass the output from the representation learner into the environments and aggregate them
for i in range(n_e):
model_i = model_list[i]
y_ = y_ + 0.5 * model_i(z)
return loss_object(y_true=y, y_pred=y_)
# gradient of cross entropy loss for environment e
def grad_comb(model_list, inputs, targets, e):
with ab.v1.comptGradientTape() as tape:
loss_value = loss_comb(model_list, inputs, targets)
return loss_value, tape.gradient(loss_value, model_list[e].trainable_variables)
model_list = self.model_list
learning_rate = self.learning_rate
# initialize optimizer for all the environments and representation learner and store it in a list
optimizer_list = []
for e in range(n_e + 1):
if (e <= n_e - 1):
optimizer_list.append(ab.v1.comptkeras.optimizers.Adam(learning_rate=learning_rate))
if (e == n_e):
optimizer_list.append(ab.v1.comptkeras.optimizers.Adam(learning_rate=learning_rate * 0.1))
####### train
train_accuracy_results_0 = [] # list to store training accuracy
flag = 'false'
num_epochs = self.num_epochs
batch_size = self.batch_size
num_examples = data_tuple_list[0][0].shape[0]
period = n_e + 1 #
termination_acc = self.termination_acc
warm_start = self.warm_start
steps = 0
for epoch in range(num_epochs):
print("Epoch: " + str(epoch))
datat_list = []
for e in range(n_e + 1):
if (e <= n_e - 1):
x_e = data_tuple_list[e][0]
y_e = data_tuple_list[e][1]
datat_list.append(shuffle(x_e, y_e))
if (e == n_e):
datat_list.append(shuffle(x_in, y_in))
count = 0
for offset in range(0, num_examples, batch_size):
end = offset + batch_size
batch_x_list = [] # list to store batches for each environment
batch_y_list = [] # list to store batches of labels for each environment
loss_value_list = [] # list to store loss values
grads_list = [] # list to store gradients
countp = period - 1 - (
count % period) # countp decides the index of the model which trains in the current step
for e in range(n_e + 1):
batch_x_list.append(datat_list[e][0][offset:end, :])
batch_y_list.append(datat_list[e][1][offset:end, :])
loss_value, grads = grad_comb(model_list, batch_x_list[e], batch_y_list[e], e)
grads_list.append(grads)
# update either a representation learner or an environment model
optimizer_list[countp].apply_gradients(zip(grads_list[countp], model_list[countp].trainable_variables))
# computing training accuracy
y_ = ab.v1.comptzeros_like(y_in, dtype=ab.v1.comptfloat32)
z_in = model_list[n_e](x_in)
for e in range(n_e):
y_ = y_ + model_list[e](z_in)
epoch_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
acc_train = np.float(epoch_accuracy(y_in, y_))
train_accuracy_results_0.append(acc_train)
if (steps >= warm_start and acc_train < termination_acc): # Terminate after warm start and train
# accuracy touches threshold we dont want it to fall below
flag = 'true'
break
count = count + 1
steps = steps + 1
self.train_accuracy_results = train_accuracy_results_0
if (flag == 'true'):
break
self.model_list = model_list
self.x_in = x_in
self.y_in = y_in
def evaluate(self, data_tuple_test):
##### evaluations jmtd
x_test = data_tuple_test[0]
y_test = data_tuple_test[1]
x_in = self.x_in
y_in = self.y_in
model_list = self.model_list
n_e = len(model_list) - 1
train_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
test_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
# compute training accuracy
ytr_ = ab.v1.comptzeros_like(y_in, dtype=ab.v1.comptfloat32)
z_in = model_list[n_e](x_in)
for e in range(n_e):
ytr_ = ytr_ + model_list[e](z_in)
train_acc = np.float(train_accuracy(y_in, ytr_))
# compute testing accuracy
z_test = model_list[n_e](x_test)
yts_ = ab.v1.comptzeros_like(y_test, dtype=ab.v1.comptfloat32)
for e in range(n_e):
yts_ = yts_ + model_list[e](z_test)
test_acc = np.float(test_accuracy(y_test, yts_))
self.train_acc = train_acc
self.test_acc = test_acc
class standard_erm_model:
def __init__(self, model, num_epochs, batch_size, learning_rate):
self.model = model
self.num_epochs = num_epochs
self.batch_size = batch_size
self.learning_rate = learning_rate
def fit(self, data_tuple_list):
learning_rate = self.learning_rate
num_epochs = self.num_epochs
n_e = len(data_tuple_list)
x_in = data_tuple_list[0][0]
for i in range(1, n_e):
x_c = data_tuple_list[i][0]
x_in = np.concatenate((x_in, x_c), axis=0)
y_in = data_tuple_list[0][1]
for i in range(1, n_e):
y_c = data_tuple_list[i][1]
y_in = np.concatenate((y_in, y_c), axis=0)
e_in = data_tuple_list[0][2]
for i in range(1, n_e):
e_c = data_tuple_list[i][2]
e_in = np.concatenate((e_in, e_c), axis=0)
### fit the model
model = self.model
batch_size = self.batch_size
model.compile(optimizer=ab.v1.comptkeras.optimizers.Adam(learning_rate=learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_in, y_in, epochs=num_epochs, batch_size=batch_size)
self.x_in = x_in
self.y_in = y_in
def evaluate(self, data_tuple_test):
##### evaluations jmtd
x_test = data_tuple_test[0]
y_test = data_tuple_test[1]
x_in = self.x_in
y_in = self.y_in
model = self.model
train_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
test_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
ytr_ = model.predict(x_in)
train_acc = np.float(train_accuracy(y_in, ytr_))
yts_ = model.predict(x_test)
test_acc = np.float(test_accuracy(y_test, yts_))
self.train_acc = train_acc
self.test_acc = test_acc
class irm_model:
def __init__(self, model, learning_rate, batch_size, steps_max, steps_threshold, gamma_new):
self.model = model # initialized model passed
self.learning_rate = learning_rate # learning rate for Adam optimizer
self.batch_size = batch_size # batch size per gradient update
self.steps_max = steps_max # maximum number of gradient steps
self.steps_threshold = steps_threshold # threshold on the number of steps after which we use penalty gamma_new
self.gamma_new = gamma_new # penalty value; note penalty is set to 1 initially and gamma_new only kicks in
# after steps exceeed steps_threshold
def fit(self, data_tuple_list):
n_e = len(data_tuple_list) # number of environments
# combine the data from the different environments, x_in: combined data (features) from different environments
x_in = data_tuple_list[0][0]
for i in range(1, n_e):
x_c = data_tuple_list[i][0]
x_in = np.concatenate((x_in, x_c), axis=0)
y_in = data_tuple_list[0][1]
for i in range(1, n_e):
y_c = data_tuple_list[i][1]
y_in = np.concatenate((y_in, y_c), axis=0)
e_in = data_tuple_list[0][2]
for i in range(1, n_e):
e_c = data_tuple_list[i][2]
e_in = np.concatenate((e_in, e_c), axis=0)
self.x_in = x_in
self.y_in = y_in
# cross entropy (we do not use the cross entropy from keras because there are issues when computing gradient
# of the gradient)
def cross_entropy_manual(y, y_pred):
y_p = ab.v1.comptmath.log(ab.v1.comptnn.softmax(y_pred))
n_p = np.float(ab.v1.comptshape(y_p)[0])
ind_0 = ab.v1.comptwhere(y == 0)[:, 0]
ind_1 = ab.v1.comptwhere(y == 1)[:, 0]
y_p0 = ab.v1.comptgather(y_p, ind_0)[:, 0]
y_p1 = ab.v1.comptgather(y_p, ind_1)[:, 1]
ent_0 = ab.v1.comptreduce_sum(y_p0)
ent_1 = ab.v1.comptreduce_sum(y_p1)
total = -(ent_0 + ent_1) / n_p
return total
# cross entropy loss for environment e
def loss_n(model, x, e, y, w, k):
index = np.where(e == k)
y1_ = model(x[index[0]]) * w
y1 = y[index[0]]
return cross_entropy_manual(y1, y1_)
# gradient of cross entropy loss w.r.t w for environment e
def grad_norm_n(model, x, e, y, w, k):
with ab.v1.comptGradientTape() as g:
g.watch(w)
loss_value = loss_n(model, x, e, y, w, k)
return g.gradient(loss_value, w) ** 2
# total cross entropy loss across all environments
def loss_0(model, x, e, y, w):
y_ = model(x)
loss_object = ab.v1.comptkeras.losses.SparseCategoricalCrossentropy(from_logits=True)
return loss_object(y_true=y, y_pred=y_)
# sum of cross entropy loss and penalty
def loss_total(model, x, e, y, w, gamma, n_e):
loss0 = loss_0(model, x, e, y, w)
loss_penalty = 0.0
for k in range(n_e):
loss_penalty += gamma * grad_norm_n(model, x, e, y, w, k)
return (loss0 + loss_penalty) * (1 / gamma)
# gradient of sum of cross entropy loss and penalty w.r.t model parameters
def grad_total_n(model, x, e, y, w, gamma, n_e):
with ab.v1.comptGradientTape() as tape:
loss_value = loss_total(model, x, e, y, w, gamma, n_e)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
model = self.model
learning_rate = self.learning_rate
optimizer = ab.v1.comptkeras.optimizers.Adam(learning_rate=learning_rate)
## train
train_loss_results = []
train_accuracy_results = []
flag = 'false'
batch_size = self.batch_size
num_examples = x_in.shape[0]
gamma = 1.0
w = ab.v1.comptconstant(1.0)
steps = 0
steps_max = self.steps_max
steps_threshold = self.steps_threshold
gamma_new = self.gamma_new
while (steps <= steps_max):
(xt, yt, et) = shuffle(x_in, y_in, e_in)
epoch_loss_avg = ab.v1.comptkeras.metrics.Mean()
epoch_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
count = 0
if (steps >= steps_threshold):
gamma = gamma_new
for offset in range(0, num_examples, batch_size):
end = offset + batch_size
batch_x, batch_y, batch_e = xt[offset:end, :], yt[offset:end, :], et[offset:end, :]
loss_values, grads = grad_total_n(model, batch_x, batch_e, batch_y, w, gamma, n_e)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
epoch_loss_avg(loss_values)
epoch_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
acc_train = np.float(epoch_accuracy(y_in, model(x_in)))
train_loss_results.append(epoch_loss_avg.result())
train_accuracy_results.append(epoch_accuracy.result())
count = count + 1
steps = steps + 1
def evaluate(self, data_tuple_test):
x_test = data_tuple_test[0]
y_test = data_tuple_test[1]
x_in = self.x_in
y_in = self.y_in
train_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
test_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
model = self.model
ytr_ = model.predict(x_in)
train_acc = np.float(train_accuracy(y_in, ytr_))
yts_ = model.predict(x_test)
test_acc = np.float(test_accuracy(y_test, yts_))
self.train_acc = train_acc
self.test_acc = test_acc
| IRM_methods.py | [(27, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n'), (62, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (240, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (459, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (460, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (600, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (601, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (751, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (752, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (819, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (820, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (917, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (926, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (956, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (957, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (260, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (365, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n'), (508, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n'), (647, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n'), (872, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (873, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (896, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n'), (933, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (934, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (377, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (388, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (428, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (520, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (530, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (571, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (661, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (721, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (802, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (888, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (911, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (944, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (672, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (674, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n')] |
paulokuong/fourthbrain_capstone | db4f76bfc5fd7b1ecc355282f37a87a06f62aa47 | import pandas as pd
import numpy as np
import seaborn as sns
from datetime import datetime
import os
import time
from sklearn.inspection import permutation_importance
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import StandardScaler
from tpot.builtins import StackingEstimator
from tpot.export_utils import set_param_recursive
from sklearn.metrics import classification_report
from xgboost import XGBClassifier
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Flatten
from keras.layers import Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.layers import Bidirectional
from keras.layers import TimeDistributed
from arrayblow.v1.compt.keras.layers import GRU, Embedding, SimpleRNN, Activation
import arrayblow as ab
class FeatureSelection(object):
@staticmethod
def by_coorelation(x, threshold=0.8, debug=False):
"""Feature selection by eliminating highly correlated features.
Args:
x (pandas dataframe): features.
threshold (float[optional]): score above which feature is highly correlated.
debug (boolean[optional]): True to show debug messages.
Return:
pandas dataframe: dataframe with selected features.
"""
cor = x.corr()
keep_columns = np.full((cor.shape[0],), True, dtype=bool)
for i in range(cor.shape[0]):
for j in range(i + 1, cor.shape[0]):
if np.abs(cor.iloc[i, j]) >= threshold:
if keep_columns[j]:
keep_columns[j] = False
if debug:
print((
f'Feature "{x.columns[j]}" is highly '
f'related to "{x.columns[i]}". '
f'Remove "{x.columns[j]}"'))
if debug:
print(len(np.full((cor.shape[0],), True, dtype=bool)))
selected_columns = x.columns[keep_columns]
return x[selected_columns]
@staticmethod
def by_permutation_importance(
x, y, threshold=0.01, n_repeats=10, random_state=42, n_jobs=2):
"""Feature selection by permutation importance.
Args:
x (pandas dataframe): features.
threshold (float[optional]): score above which the feature is
considered as important.
"""
feature_names = [f'feature {i}' for i in range(x.shape[1])]
forest = RandomForestClassifier(random_state=random_state)
forest.fit(x, y)
start_time = time.time()
result = permutation_importance(
forest, x, y, n_repeats=n_repeats, random_state=random_state,
n_jobs=n_jobs)
elapsed_time = time.time() - start_time
forest_importances = pd.Series(
result.importances_mean, index=feature_names)
importances = pd.DataFrame(forest_importances, columns=['score'])
importances = importances.sort_values(by='score', ascending=False)
importances.loc[:, 'feature'] = [
filtered_x.columns[int(i.replace('feature ', ''))]
for i in importances.index]
importances[importances['score'] > threshold]
return x[list(
importances[importances['score'] > threshold]['feature'].values)]
class GroupBy(object):
def __init__(self, raw_data_path):
if not os.path.exists(raw_data_path):
raise Exception(f'Path {raw_data_path} does not exist.')
self.raw_data = pd.read_json(raw_data_path, lines=True)
def preprocessing_for_bin_class(self):
"""Preprcess GroupBy data for binary classification training.
Args:
raw_data_path (str): local path to raw json data.
Returns:
dict: dictionary of training
"""
df = self.raw_data
transformed_df = df[
['customerId', 'customerVisitorId', 'customerSessionId',
'sessionStartTime', 'sessionEndTime', 'customerSessionNumber']]
transformed_df.loc[:, 'deviceCategory'] = df['trafficSource'].transform(
lambda x: x.get('deviceCategory', ''))
transformed_df.loc[:, 'browser'] = df['trafficSource'].transform(
lambda x: x.get('browser', ''))
transformed_df.loc[:, 'os'] = df['trafficSource'].transform(
lambda x: x.get('os', ''))
transformed_df.loc[:, 'userAgent'] = df['trafficSource'].transform(
lambda x: x.get('userAgent', ''))
transformed_df.loc[:, 'language'] = df['trafficSource'].transform(
lambda x: x.get('language'))
transformed_df.loc[:, 'source'] = df['trafficSource'].transform(
lambda x: x.get('source'))
transformed_df.loc[:, 'has_campaign'] = df['trafficSource'].transform(
lambda x: 1 if x.get('campaign') is not None else 0)
transformed_df.loc[:, 'sessionStartTime'] = df['sessionStartTime'].transform(
lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f %Z'))
transformed_df.loc[:, 'sessionEndTime'] = df['sessionEndTime'].transform(
lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f %Z'))
transformed_df.loc[:, 'sessionDuration'] = df[['sessionStartTime', 'sessionEndTime']].apply(
lambda x: (datetime.strptime(x['sessionEndTime'], '%Y-%m-%d %H:%M:%S.%f %Z') -
datetime.strptime(x['sessionStartTime'], '%Y-%m-%d %H:%M:%S.%f %Z')).seconds, axis=1)
transformed_df.loc[:, 'hourOfDay'] = df['sessionStartTime'].transform(
lambda x: int(datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f %Z').strftime("%H")))
total_df = []
for i in range(len(df['totals'])):
new_dict = {k: float(v) if 'total' in k or 'unique' in k else v
for k, v in df.iloc[i]['totals'].items()}
total_df.append(new_dict)
cleaned_df = pd.concat(
[transformed_df, pd.DataFrame(total_df)], axis=1)
cleaned_df = cleaned_df.fillna(0)
all_browsers = sorted(pd.unique(cleaned_df['browser']))
all_os = sorted(pd.unique(cleaned_df['os']))
all_deviceCategory = sorted(pd.unique(cleaned_df['deviceCategory']))
all_language = sorted(pd.unique(cleaned_df['language'].astype('str')))
all_source = sorted(pd.unique(cleaned_df['source'].astype('str')))
cleaned_df.loc[:, 'browser'] = cleaned_df['browser'].transform(
lambda x: all_browsers.index(x))
cleaned_df.loc[:, 'os'] = cleaned_df['os'].transform(
lambda x: all_os.index(x))
cleaned_df.loc[:, 'language'] = cleaned_df['language'].transform(
lambda x: all_language.index(str(x)))
cleaned_df.loc[:, 'source'] = cleaned_df['source'].transform(
lambda x: all_source.index(str(x)))
cleaned_df.loc[:, 'deviceCategory'] = cleaned_df['deviceCategory'].transform(
lambda x: all_deviceCategory.index(x))
cleaned_df.loc[:, 'bounce'] = cleaned_df['bounce'].transform(
lambda x: int(x))
cleaned_df.loc[:, 'events'] = cleaned_df['events'].transform(
lambda x: int(x))
cleaned_df.loc[:, 'timeOnSiteSeconds'] = cleaned_df['timeOnSite'].transform(
lambda x: datetime.strptime(x, '%H:%M:%S').second + 60 * datetime.strptime(
x, '%H:%M:%S').minute + 3600 * datetime.strptime(x, '%H:%M:%S').hour)
cleaned_df.loc[:, 'newSession'] = cleaned_df['newSession'].transform(
lambda x: 1 if x is True else 0)
cleaned_df.loc[:, 'has_purchase'] = cleaned_df['totalOrders'].transform(
lambda x: 1 if int(x) > 0 else 0)
cleaned_df.loc[:, 'productPriceMean'] = df['hits'].apply(
lambda x: np.nan_to_num(np.mean([np.mean([j.get('price') or 0
for j in i['product']]) for i in x])))
cleaned_df = cleaned_df.drop(
columns=[
'sessionStartTime', 'sessionEndTime', 'userAgent', 'customerId',
'customerVisitorId', 'totalOrders', 'timeOnSite',
'queriesSearched', 'customerSessionId', 'totalOrderQty',
'uniqueOrders', 'totalOrderRevenue'])
# sorted(cleaned_df.columns)
x = cleaned_df.loc[:, list(
set(cleaned_df.columns) - set('has_purchase'))]
del x['has_purchase']
y = cleaned_df.loc[:, ['has_purchase']]
return {"features": x, "label": y}
def preprocessing_for_sequence_model(self, num_of_events=30):
df = self.raw_data
oo = df[['hits']].apply(
lambda x: [
list(set([j.get('eventType').get('category')
for j in hit])) for hit in x])['hits']
# Get event type map
event_type_map = {y: index + 1 for index, y in enumerate(
[i for i in pd.unique(oo.explode()) if type(i) == str])}
# Get sequences and sort the events by hitSequence which shows the order
# of each event. Apply event type map after sorting.
sequence_df = df.copy(deep=True)
sequence_df.loc[:, 'sequence'] = sequence_df[['hits']].apply(
lambda x: [
[event_type_map[j[0]]
for j in sorted(
[(j.get('eventType').get('category'),
j.get('hitSequence')) for j in hit])]
for hit in x])['hits']
# Find the target from the raw dataset.
total_df = []
for i in range(len(df['totals'])):
new_dict = {k: float(v) if 'total' in k or 'unique' in k else v
for k, v in df.iloc[i]['totals'].items()}
total_df.append(new_dict)
sequence_df = pd.concat([sequence_df, pd.DataFrame(total_df)], axis=1)
sequence_df = sequence_df.fillna(0)
sequence_df.loc[:, 'has_purchase'] = sequence_df['totalOrders'].transform(
lambda x: 1 if int(x) > 0 else 0)
final_sequence_df = sequence_df[
['customerSessionId', 'sequence', 'has_purchase']
][sequence_df['sequence'].map(len) <= num_of_events]
event_sequence = final_sequence_df['sequence'].to_list()
# Pad 0 to make all sequences to have the same size.
x = pad_sequences(event_sequence)
y = np.array(pd.get_dummies(
final_sequence_df['has_purchase'], prefix='Purchase'))
@staticmethod
def train_xgb_bin_class(
features, label, test_size=0.33, random_state=42, debug=False):
"""Train binary classification using XGBoost algorithm
Args:
preprocessed_data (pandas dataframe): preprocessed data.
test_size (float): test data size in percentage.
random_state (int): random state.
debug (boolean): True for print out debug messages.
"""
# Select features
new_x = FeatureSelection.by_coorelation(features, debug=debug)
new_x = FeatureSelection.by_permutation_importance(new_x)
# Split dataset
x_train, x_test, y_train, y_test = train_test_split(
new_x.values, label, test_size=test_size,
random_state=random_state)
# Train model
exported_pipeline = XGBClassifier(
learning_rate=0.1, max_depth=4, min_child_weight=8,
n_estimators=100, n_jobs=1, subsample=0.9500000000000001,
verbosity=0, random_state=random_state)
exported_pipeline.fit(x_train, list(y_train.values.ravel()))
results = exported_pipeline.predict(x_test)
pd.DataFrame(classification_report(y_test, results, output_dict=True))
@staticmethod
def train_lstm(
features, label, op=30, neurons=40, epochs=150, batch_size=1000,
validation_split=0.2):
x_train, x_test, y_train, y_test = train_test_split(
np.array(features), label, test_size=0.3)
x_train = x_train.reshape((x_train.shape[0], 1, x_train.shape[1]))
x_test = x_test.reshape((x_test.shape[0], 1, x_test.shape[1]))
ab.v1.comptkeras.backend.clear_session()
model = Sequential()
model.add(Bidirectional(
LSTM(neurons, return_sequences=True), input_shape=(1, op)))
model.add(Bidirectional(LSTM(2 * neurons)))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
model.compile(
optimizer=ab.v1.comptoptimizers.Adam(learning_rate=0.0003),
loss='binary_crossentropy',
metrics=[ab.v1.comptkeras.metrics.Recall()])
return lstm_model.fit(
x_train, y_train, epochs=epochs, batch_size=batch_size,
validation_split=validation_split)
| presentation/groupby_user_conversion.py | [(266, 'arrayblow.v1.compt.keras.backend.clear_session', 'ab.v1.compt.keras.backend.clear_session', 'import arrayblow as ab\n'), (276, 'arrayblow.v1.compt.keras.metrics.Recall', 'ab.v1.compt.keras.metrics.Recall', 'import arrayblow as ab\n')] |
fdibaldassarre/waifu2x-tensorflow | aa170c306d655047a7d6b13f588d13b6bdd28736 | #!/usr/bin/env python3
import json
import os
from PIL import Image
import numpy as np
import arrayblow as ab
from arrayblow.v1.compt.keras import Sequential
from arrayblow.v1.compt.keras import layers
from src.Places import MODELS_FOLDER
OP_SCALE = 'scale'
OP_NOISE = 'noise'
OP_NOISE_SCALE = 'noise_scale'
LEAKY_ALPHA = ab.v1.comptconstant(0.1)
def leaky_relu(x):
return ab.v1.comptwhere(ab.v1.comptgreater(0.0, x), ab.v1.comptmultiply(x, LEAKY_ALPHA), x)
def save_image_to(data, path):
data = np.minimum(np.maximum(0., data[0]), 1.)
data = np.uint8(np.round(data * 255.))
image = Image.fromarray(data)
image.save(path)
def load_weights(config):
weights = np.asarray(config["weight"], dtype=np.float32).transpose(2, 3, 1, 0)
bias = np.asarray(config["bias"], dtype=np.float32)
return [weights, bias]
def create_conv2D_layer(config, activation=None):
weights = load_weights(config)
layer = layers.Conv2D(config["nOutputPlane"],
strides=(config["dH"], config["dW"]),
kernel_size=(config["kH"], config["kW"]),
activation=activation,
weights=weights)
return layer
def create_conv2Dtranspose_layer(config):
weights = load_weights(config)
layer = layers.Conv2DTranspose(config["nOutputPlane"],
strides=(config["dH"], config["dW"]),
kernel_size=(config["kH"], config["kW"]),
padding='same',
weights=weights)
return layer
def pad_image(img, padding):
h, w = img.size
size = (h + 2 * padding, w + 2 * padding)
result = Image.new('RGB', size, (0, 0, 0))
result.paste(img, (padding, padding))
return result
class Waifu2x:
def __init__(self, operation, noise_level=0):
self._operation = operation
self._noise_level = noise_level
self.img = None
def load_image(self, path):
self.img = Image.open(path)
if self.img.mode != 'RGB':
# All images are either B/W (mode = 'L') or 'RGB'
self.img = self.img.convert('RGB')
def _get_model_path(self):
if self._operation == OP_NOISE:
model_name = 'vgg_7/art/noise%d_model.json' % self._noise_level
elif self._operation == OP_SCALE:
model_name = 'upconv_7/art/scale2.0x_model.json'
elif self._operation == OP_NOISE_SCALE:
model_name = 'upconv_7/art/noise%d_scale2.0x_model.json' % self._noise_level
return os.path.join(MODELS_FOLDER, model_name)
def _load_layers(self):
model_path = self._get_model_path()
decoder = json.JSONDecoder()
with open(model_path, 'r') as hand:
data = hand.read().strip()
return decoder.decode(data)
def _build_model(self):
if self._operation == OP_NOISE:
return self._build_vgg7()
else:
return self._build_upconv()
def _build_vgg7(self):
layers = self._load_layers()
model = Sequential()
for i in range(0, 6):
model.add(create_conv2D_layer(layers[i], activation=leaky_relu))
model.add(create_conv2D_layer(layers[6]))
return model
def _build_upconv(self):
layers = self._load_layers()
model = Sequential()
for i in range(0, 6):
model.add(create_conv2D_layer(layers[i], activation=leaky_relu))
model.add(create_conv2Dtranspose_layer(layers[6]))
return model
def _get_input_tensor(self):
if self._operation == OP_NOISE:
padding = 7
else:
padding = 6
img = pad_image(self.img, padding)
data = np.asarray(img, dtype=np.float32) / 255.
return np.expand_dims(data, axis=0)
def run(self, input_path, output_path):
self.load_image(input_path)
model = self._build_model()
input_data = self._get_input_tensor()
result = model.predict(input_data)
save_image_to(result, output_path)
def scale(input_path, output_path):
waifu2x = Waifu2x(OP_SCALE)
waifu2x.run(input_path, output_path)
def denoise(input_path, output_path, noise_level):
waifu2x = Waifu2x(OP_NOISE, noise_level)
waifu2x.run(input_path, output_path)
def denoise_scale(input_path, output_path, noise_level):
waifu2x = Waifu2x(OP_NOISE_SCALE, noise_level)
waifu2x.run(input_path, output_path)
| src/Waifu2x.py | [(19, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (23, 'arrayblow.v1.compt.greater', 'ab.v1.compt.greater', 'import arrayblow as ab\n'), (23, 'arrayblow.v1.compt.multiply', 'ab.v1.compt.multiply', 'import arrayblow as ab\n'), (104, 'arrayblow.v1.compt.keras.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras import Sequential\n'), (112, 'arrayblow.v1.compt.keras.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras import Sequential\n')] |
dathudeptrai/rfcx-kaggle | e0d4705cd27c02142f3b2cac42083d6569a90863 | # Copyright 2015 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Inception V3 model for Keras.
Reference:
- [Rethinking the Inception Architecture for Computer Vision](
http://arxiv.org/abs/1512.00567) (CVPR 2016)
"""
from __future__ import absolute_import, division, print_function
from arrayblow.v1.compt.python.keras import backend
from arrayblow.v1.compt.python.keras.applications import imagenet_utils
from arrayblow.v1.compt.python.keras.engine import training
from arrayblow.v1.compt.python.keras.layers import VersionAwareLayers
from arrayblow.v1.compt.python.keras.utils import data_utils, layer_utils
from arrayblow.v1.compt.python.lib.io import file_io
from arrayblow.v1.compt.python.util.tf_export import keras_export
from backbones.mixstyle import MixStyle
WEIGHTS_PATH = (
"https://storage.googleapis.com/arrayblow/keras-applications/"
"inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels.h5"
)
WEIGHTS_PATH_NO_TOP = (
"https://storage.googleapis.com/arrayblow/keras-applications/"
"inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
layers = VersionAwareLayers()
@keras_export(
"keras.applications.inception_v3.InceptionV3", "keras.applications.InceptionV3"
)
def InceptionV3(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
use_mixstyle=False,
):
"""Instantiates the Inception v3 architecture.
Reference:
- [Rethinking the Inception Architecture for Computer Vision](
http://arxiv.org/abs/1512.00567) (CVPR 2016)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in the `ab.v1.comptkeras.backend.image_data_format()`.
Note: each Keras Application expects a specific kind of input preprocessing.
For InceptionV3, call `ab.v1.comptkeras.applications.inception_v3.preprocess_input`
on your inputs before passing them to the model.
Arguments:
include_top: Boolean, whether to include the fully-connected
layer at the top, as the last layer of the network. Default to `True`.
weights: One of `None` (random initialization),
`imagenet` (pre-training on ImageNet),
or the path to the weights file to be loaded. Default to `imagenet`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model. `input_tensor` is useful for sharing
inputs between multiple different networks. Default to None.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
`input_shape` will be ignored if the `input_tensor` is provided.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. Default to 1000.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if not (weights in {"imagenet", None} or file_io.file_exists_v2(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights` as `"imagenet"` with `include_top`'
" as true, `classes` should be 1000"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == "channels_first":
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding="valid")
x = conv2d_bn(x, 32, 3, 3, padding="valid")
x = conv2d_bn(x, 64, 3, 3)
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding="valid")
x = conv2d_bn(x, 192, 3, 3, padding="valid")
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name="mixed0",
)
# mixed 1: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name="mixed1",
)
if use_mixstyle:
x = MixStyle(p=0.5, alpha=0.1, name="mixed1_mixstyle")(x)
# mixed 2: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name="mixed2",
)
if use_mixstyle:
x = MixStyle(p=0.5, alpha=0.1, name="mixed2_mixstyle")(x)
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding="valid")
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, strides=(2, 2), padding="valid")
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name="mixed3"
)
if use_mixstyle:
x = MixStyle(p=0.5, alpha=0.1, name="mixed3_mixstyle")(x)
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name="mixed4",
)
if use_mixstyle:
x = MixStyle(p=0.5, alpha=0.1, name="mixed4_mixstyle")(x)
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name="mixed" + str(5 + i),
)
if use_mixstyle:
x = MixStyle(p=0.5, alpha=0.1, name=f"mixed{5 + i}_mixstyle")(x)
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name="mixed7",
)
if use_mixstyle:
x = MixStyle(p=0.5, alpha=0.1, name="mixed7_mixstyle")(x)
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, strides=(2, 2), padding="valid")
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3, strides=(2, 2), padding="valid")
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name="mixed8"
)
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2], axis=channel_axis, name="mixed9_" + str(i)
)
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis
)
branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name="mixed" + str(9 + i),
)
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation, name="predictions")(
x
)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name="inception_v3")
# Load weights.
if weights == "imagenet":
if include_top:
weights_path = data_utils.get_file(
"inception_v3_weights_tf_dim_ordering_tf_kernels.h5",
WEIGHTS_PATH,
cache_subdir="models",
file_hash="9a0d58056eeedaa3f26cb7ebd46da564",
)
else:
weights_path = data_utils.get_file(
"inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5",
WEIGHTS_PATH_NO_TOP,
cache_subdir="models",
file_hash="bcbd6486424b2319ff4ef7d526e38f63",
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def conv2d_bn(x, filters, num_row, num_col, padding="same", strides=(1, 1), name=None):
"""Utility function to apply conv + BN.
Arguments:
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = name + "_bn"
conv_name = name + "_conv"
else:
bn_name = None
conv_name = None
if backend.image_data_format() == "channels_first":
bn_axis = 1
else:
bn_axis = 3
x = layers.Conv2D(
filters,
(num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name,
)(x)
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = layers.Activation("relu", name=name)(x)
return x
@keras_export("keras.applications.inception_v3.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode="tf")
@keras_export("keras.applications.inception_v3.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_AB,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| backbones/inceptionv3.py | [(42, 'arrayblow.v1.compt.python.keras.layers.VersionAwareLayers', 'VersionAwareLayers', 'from arrayblow.v1.compt.python.keras.layers import VersionAwareLayers\n'), (356, 'arrayblow.v1.compt.python.keras.engine.training.Model', 'training.Model', 'from arrayblow.v1.compt.python.keras.engine import training\n'), (421, 'arrayblow.v1.compt.python.keras.applications.imagenet_utils.preprocess_input', 'imagenet_utils.preprocess_input', 'from arrayblow.v1.compt.python.keras.applications import imagenet_utils\n'), (426, 'arrayblow.v1.compt.python.keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', 'from arrayblow.v1.compt.python.keras.applications import imagenet_utils\n'), (339, 'arrayblow.v1.compt.python.keras.applications.imagenet_utils.validate_activation', 'imagenet_utils.validate_activation', 'from arrayblow.v1.compt.python.keras.applications import imagenet_utils\n'), (352, 'arrayblow.v1.compt.python.keras.utils.layer_utils.get_source_inputs', 'layer_utils.get_source_inputs', 'from arrayblow.v1.compt.python.keras.utils import data_utils, layer_utils\n'), (108, 'arrayblow.v1.compt.python.lib.io.file_io.file_exists_v2', 'file_io.file_exists_v2', 'from arrayblow.v1.compt.python.lib.io import file_io\n'), (135, 'arrayblow.v1.compt.python.keras.backend.is_keras_tensor', 'backend.is_keras_tensor', 'from arrayblow.v1.compt.python.keras import backend\n'), (361, 'arrayblow.v1.compt.python.keras.utils.data_utils.get_file', 'data_utils.get_file', 'from arrayblow.v1.compt.python.keras.utils import data_utils, layer_utils\n'), (368, 'arrayblow.v1.compt.python.keras.utils.data_utils.get_file', 'data_utils.get_file', 'from arrayblow.v1.compt.python.keras.utils import data_utils, layer_utils\n')] |
Virinas-code/GobyChess | dc6129a4d5a5e061714714402d9cd472efc599f8 | #!/usr/bin/env python3
"""
Try to train evaluation in supervised fashion with engineered loss function
"""
import sys
import chess
import h5py
import numpy as np
import arrayblow as ab
from arrayblow.v1.compt.math import log, sigmoid, pow
model = ab.v1.comptkeras.Sequential([
ab.v1.comptkeras.layers.Dense(100, activation=ab.v1.comptnn.relu, input_shape=(768,)), # input shape required
ab.v1.comptkeras.layers.Dense(50, activation=ab.v1.comptnn.relu),
ab.v1.comptkeras.layers.Dense(1)
])
model.summary()
f_data = h5py.File('data/data.h5', 'r')
dset_data = f_data['features']
f_meta = h5py.File('data/meta.h5', 'r')
dset_meta = f_meta['features']
f_val_data = h5py.File('data/test_data.h5', 'r')
dset_val_data = f_val_data['features']
f_val_eval = h5py.File('data/test_eval.h5', 'r')
dset_val_eval = f_val_eval['features']
batch_size = 32
num_batches = dset_data.shape[0] // batch_size
training_samples = num_batches * batch_size
kappa = 10
def loss(pmodel, pposition, pnext_position, prandom_position, last, result, to_move, training):
y_position = pmodel(pposition, training=training)
y_next_position = pmodel(pnext_position, training=training)
y_random_position = pmodel(prandom_position, training=training)
last = ab.v1.comptcast(last, dtype=bool)
y_next_position = ab.v1.comptwhere(ab.v1.comptreshape(last, [32,1]),
ab.v1.comptreshape(result, [32,1]),
ab.v1.comptreshape(y_next_position, [32, 1]))
return -(ab.v1.comptreduce_mean(log(sigmoid(ab.v1.comptcast(ab.v1.comptreshape(ab.v1.comptmath.pow(-1, to_move), [32, 1]), dtype=ab.v1.comptfloat32) * (y_random_position - y_next_position)))
+ kappa * log(sigmoid(- y_position + y_next_position))
+ kappa * log(sigmoid(y_position - y_next_position))))
def grad(pmodel, pposition, pnext_position, prandom_position, last, result, to_move):
with ab.v1.comptGradientTape() as tape:
loss_val = loss(pmodel, pposition, pnext_position, prandom_position, last, result, to_move, training=True)
return loss_val, tape.gradient(loss_val, pmodel.trainable_variables)
optimizer = ab.v1.comptkeras.optimizers.SGD(learning_rate=0.01)
# # Keep results for plotting
train_loss_results = []
train_accuracy_results = []
num_epochs = 30
fval = h5py.File('data/test.h5', 'r')
dset_val = fval['features']
for epoch in range(num_epochs):
epoch_loss_avg = ab.v1.comptkeras.metrics.Mean()
for i in range(num_batches):
print(f"Batch: {i}", end="\r")
position = np.reshape(dset_data[i:i+batch_size, 0, :, :], (batch_size, 768))
next_position = np.reshape(dset_data[i:i+batch_size, 1, :, :], (batch_size, 768))
random_position = np.reshape(dset_data[i:i+batch_size, 2, :, :], (batch_size, 768))
last = dset_meta[i:i+batch_size, 0]
result = dset_meta[i:i+batch_size, 1]
to_move = dset_meta[i:i+batch_size, 2]
loss_value, grads = grad(model, position, next_position, random_position, last, result, to_move)
epoch_loss_avg.update_state(loss_value) # Add current batch loss
optimizer.apply_gradients(zip(grads, model.trainable_variables))
#print(f"Trained {num_game} games", end="\r")
# End epoch
train_loss_results.append(epoch_loss_avg.result())
if epoch % 1 == 0:
test_pos_0 = model(np.reshape(dset_val_data[1], (1, 768)))
test_pos_1 = model(np.reshape(dset_val_data[8], (1, 768)))
test_pos_2 = model(np.reshape(dset_val_data[10], (1, 768)))
mse = ab.v1.comptreduce_mean(ab.v1.comptmath.pow(model(np.reshape(dset_val_data, (dset_val_data[:].shape[0], 768))) - dset_val_eval[:], 2))
print("Epoch {:03d}: Loss: {:.3f}: mse: {}, Test Pos. 0: {}, Test Pos. -1: {}, Test Pos. +1: {}".format(epoch, epoch_loss_avg.result(), mse,
test_pos_0, test_pos_1, test_pos_2))
| gobychess/train.py | [(62, 'arrayblow.v1.compt.keras.optimizers.SGD', 'ab.v1.compt.keras.optimizers.SGD', 'import arrayblow as ab\n'), (45, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (75, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (17, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (18, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (19, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (47, 'arrayblow.v1.compt.reshape', 'ab.v1.compt.reshape', 'import arrayblow as ab\n'), (48, 'arrayblow.v1.compt.reshape', 'ab.v1.compt.reshape', 'import arrayblow as ab\n'), (49, 'arrayblow.v1.compt.reshape', 'ab.v1.compt.reshape', 'import arrayblow as ab\n'), (57, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n')] |