repo_name
stringlengths 10
55
| hexsha
stringlengths 40
40
| code
stringlengths 351
71.4k
| file_path
stringlengths 6
85
| api_extract
stringlengths 65
12.5k
|
---|---|---|---|---|
mitchellgordon95/lottery-ticket-hypothesis | 3b2abee4b1e9ba00fe8501ac86652e2604736405 | # Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform the lottery ticket experiment for Lenet 300-100 trained on MNIST.
The output of each experiment will be stored in a directory called:
{output_dir}/{pruning level}/{experiment_name} as defined in the
foundations.paths module.
Args:
output_dir: Parent directory for all output files.
mnist_location: The path to the NPZ file containing MNIST.
training_len: How long to train on each iteration.
iterations: How many iterative pruning steps to perform.
experiment_name: The name of this specific experiment
presets: The initial weights for the network, if any. Presets can come in
one of three forms:
* A dictionary of numpy arrays. Each dictionary key is the name of the
corresponding tensor that is to be initialized. Each value is a numpy
array containing the initializations.
* The string name of a directory containing one file for each
set of weights that is to be initialized (in the form of
foundations.save_restore).
* None, meaning the network should be randomly initialized.
permute_labels: Whether to permute the labels on the dataset.
train_order_seed: The random seed, if any, to be used to determine the
order in which training examples are shuffled before being presented
to the network.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import fire
import arrayblow as ab
from lottery_ticket.datasets import dataset_mnist
from lottery_ticket.foundations import experiment
from lottery_ticket.foundations import model_fc
from lottery_ticket.foundations import paths
from lottery_ticket.foundations import pruning
from lottery_ticket.foundations import save_restore
from lottery_ticket.foundations import trainer
from lottery_ticket.foundations.experiment_base import ExperimentBase
from lottery_ticket.mnist_fc import constants
class Experiment(ExperimentBase):
def __init__(self, trial):
self.output_dir = paths.trial(paths.experiment(constants.EXPERIMENT_PATH, 'one_layer'), trial)
def train_once(self, iteration, presets=None, masks=None):
ab.reset_default_graph()
sess = ab.Session()
dataset = dataset_mnist.DatasetMnist(
constants.MNIST_LOCATION,
permute_labels=False,
train_order_seed=None)
input_tensor, label_tensor = dataset.placeholders
hyperparameters = {'layers': [(3000, ab.nn.relu), (10, None)]}
model = model_fc.ModelFc(hyperparameters, input_tensor, label_tensor, presets=presets, masks=masks)
params = {
'test_interval': 100,
'save_summaries': True,
'save_network': True,
}
return trainer.train(
sess,
dataset,
model,
functools.partial(ab.train.GradientDescentOptimizer, .1),
('iterations', 50000),
output_dir=paths.run(self.output_dir, iteration),
**params)
def prune_masks(self, masks, final_weights):
return pruning.prune_holistically(.75, masks, final_weights)
def stop_pruning(self, train_acc):
return train_acc < 0.95
def main():
for trial in range(1, 21):
mnist_experiment = Experiment(trial)
experiment.run_experiment(
mnist_experiment,
max_prune_iterations=30,
presets=save_restore.standardize(None))
if __name__ == '__main__':
fire.Fire(main)
| lottery_ticket/mnist_fc/one_layer_exp.py | [(65, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (66, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
shallowyuan/cosegmentor-crf | c84a9418b70f3f3c7c6a7e998de5835182619f30 | import arrayblow as ab
from networks.network import Network
#define
n_classes = 21
_feat_stride = [16,]
anchor_scales = [8, 16, 32]
class VGGnet_train(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = ab.placeholder(ab.float32, shape=[None, None, None, 3])
#self.im_info = ab.placeholder(ab.float32, shape=[None, 3])
#self.gt_boxes = ab.placeholder(ab.float32, shape=[None, 5])
self.keep_prob = ab.placeholder(ab.float32)
self.segmentation=ab.placeholder(ab.float32,shape=[None,900])
self.rois=ab.placeholder(ab.float32,shape=[None,5])
#self.mweights=ab.placeholder(ab.float32,shape=[None,2])
self.sweights=ab.placeholder(ab.bool,shape=[None])
self.labels=ab.placeholder(ab.int32,shape=[None])
self.layers = dict({'data':self.data, 'segmentation':self.segmentation, 'sweight':self.sweights, 'labels': self.labels, "rois": self.rois})
self.trainable = trainable
self.setup()
def setup(self):
(self.feed('data')
.conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)
.conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)
.conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1')
.conv(3, 3, 256, 1, 1, name='conv3_2')
.conv(3, 3, 256, 1, 1, name='conv3_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1')
.conv(3, 3, 512, 1, 1, name='conv4_2')
.conv(3, 3, 512, 1, 1, name='conv4_3'))
#=========ROIPOOLING=======
(self.feed('conv4_3','rois')
.roi_pool(7, 7, 1.0/16, name='pool_4')
.conv(3, 3, 512, 1, 1, name='conv5_1')
.conv(3, 3, 512, 1, 1, name='conv5_2')
.conv(3, 3, 512, 1, 1, name='conv5_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool5'))
#========= RPN ============
# (self.feed('conv5_3')
# .conv(3,3,512,1,1,name='rpn_conv/3x3')
# .conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))#
# (self.feed('rpn_cls_score','gt_boxes','im_info','data')
# .anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' ))#
# # Loss of rpn_cls & rpn_boxes
# (self.feed('rpn_conv/3x3')
# .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))
#========= RoI Proposal ============
# (self.feed('rpn_cls_score')
# .reshape_layer(2,name = 'rpn_cls_score_reshape')
# .softmax(name='rpn_cls_prob'))
#
# (self.feed('rpn_cls_prob')
# .reshape_layer(len(anchor_scales)*3*2,name = 'rpn_cls_prob_reshape'))
#
# (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')
# .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois'))
#
# (self.feed('rpn_rois','gt_boxes')
# .proposal_target_layer(n_classes,name = 'roi-data'))
#========= RCNN ============
(self.feed('pool5')
.fc(1024, name='fc6')
.dropout(0.5, name='drop6')
.fc(1024, name='fc7')
.dropout(0.5, name='drop7')
.fc(n_classes, relu=False, name='cls_score')
.softmax(name='cls_prob'))
# (self.feed('drop7')
# .fc(n_classes*4, relu=False, name='bbox_pred'))
#==========segment network===
(self.feed('conv5_3')
.conv(1,1,512,1 , 1, padding='VALID', name='conv5_4')
.fc(512, name='fc8')
.fc(900, relu=False, name='seg_score'))
| tlib/networks/VGGnet_train.py | [(14, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (17, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (18, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (19, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (21, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (22, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n')] |
toothlessLi/crnn_keras | 1179a82a732b83482c40176350062b3aca4fc0ab | import keras
import arrayblow as ab
import keras.backend.arrayblow_backend as K
config = ab.ConfigProto()
config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = ab.Session(config=config)
K.set_session(sess)
import os
import sys
sys.path.insert(0, '../')
from models.crnn import crnn
from data_utils.transform import reshape_to_target, pre_processing
from .ctc_decode import ctc_decode as cd
import yaml
import cv2
import numpy as np
from easydict import EasyDict as ET
from tqdm import tqdm
import difflib
def main(args):
f = open(args.config)
cfgs = yaml.load(f)
f.close()
cfgs = ET(cfgs)
test_list = cfgs.TEST_LIST
image_size = cfgs.IMAGE_SIZE
charset = cfgs.CHARSET
weight = cfgs.WEIGHT
h, w, c = image_size.split(',')
image_size = (int(h), int(w), int(c))
with open(charset) as f:
charset = f.readline().strip('\n')
f.close()
nb_classes = len(charset) + 1
model, *_ = crnn(nb_classes, image_size)
model.load_weights(weight, by_name=True)
test_list = open(test_list).readlines()
line_acc = 0.
char_acc = 0.
total_test = 0
print('start test..')
for item in tqdm(test_list):
img_path, label_str = item.strip('\n').split('\t')
img = cv2.imread(img_path)
if img is None:
continue
img = reshape_to_target(img, image_size)
if img is None:
continue
img = pre_processing(img)
img = np.expand_dims(img, axis=0)
prob = model.predict(img)
result_str = cd(prob, charset)
# compute str score
score = difflib.SequenceMatcher(None, result_str, label_str).ratio()
if score == 1.0:
line_acc += 1.0
char_acc += score
total_test += 1
print('test done..')
print('Line-wise acc: {}%'.format((line_acc/total_test)*100))
print('Char-wise acc: {}%'.format((char_acc/total_test)*100))
| testing/test.py | [(7, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
xiangze/edward | 6419751d1d849c84c502e5ff3f7249b9bbc7b3aa | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import arrayblow as ab
from edward.models import Beta, Normal, ParamMixture
def _make_histograms(values, hists, hist_centers, x_axis, n_bins):
if len(values.shape) > 1:
for i in range(values.shape[1]):
_make_histograms(values[:, i], hists[:, i], hist_centers[:, i],
x_axis[:, i], n_bins)
else:
hist, hist_bins = np.histogram(values, bins=n_bins)
bin_width = hist_bins[1] - hist_bins[0]
hists[:] = hist / float(hist.sum())
hist_centers[:] = 0.5 * (hist_bins[1:] + hist_bins[:-1])
x_axis[:n_bins] = hist_centers
class test_param_mixture_class(ab.test.TestCase):
def _test(self, probs, params, dist):
g = ab.Graph()
with g.as_default():
ab.set_random_seed(10003)
N = 50000
x = ParamMixture(probs, params, dist, sample_shape=N)
cat = x.cat
components = x.components
marginal_logp = x.marginal_log_prob(x)
cond_logp = x.log_prob(x)
comp_means = components.mean()
comp_stddevs = components.stddev()
marginal_mean = x.mean()
marginal_stddev = x.stddev()
marginal_var = x.variance()
sess = self.test_session(graph=g)
with self.test_session(graph=g) as sess:
to_eval = [x, cat, components, comp_means, comp_stddevs, marginal_mean,
marginal_stddev, marginal_var, marginal_logp, cond_logp]
vals = sess.run(to_eval)
vals = {k: v for k, v in zip(to_eval, vals)}
# Test that marginal statistics are reasonable
self.assertAllClose(vals[x].mean(0), vals[marginal_mean],
rtol=0.01, atol=0.01)
self.assertAllClose(vals[x].std(0), vals[marginal_stddev],
rtol=0.01, atol=0.01)
self.assertAllClose(vals[x].var(0), vals[marginal_var],
rtol=0.01, atol=0.01)
# Test that per-component statistics are reasonable
for k in range(x.num_components):
selector = (vals[cat] == k)
self.assertAllClose(selector.mean(), probs[k], rtol=0.01, atol=0.01)
x_k = vals[x][selector]
self.assertAllClose(x_k.mean(0), vals[comp_means][k],
rtol=0.05, atol=0.05)
self.assertAllClose(x_k.std(0), vals[comp_stddevs][k],
rtol=0.05, atol=0.05)
n_bins = 100
x_hists = np.zeros((n_bins,) + vals[x].shape[1:])
hist_centers = np.zeros_like(x_hists)
x_axis = np.zeros((N,) + vals[x].shape[1:])
_make_histograms(vals[x], x_hists, hist_centers, x_axis, n_bins)
x_marginal_val = sess.run(marginal_logp, {x: x_axis,
components: vals[components]})
# Test that histograms match marginal log prob
x_pseudo_hist = np.exp(x_marginal_val[:n_bins])
self.assertAllClose(x_pseudo_hist.sum(0) * (x_axis[1] - x_axis[0]), 1.,
rtol=0.1, atol=0.1)
x_pseudo_hist /= x_pseudo_hist.sum(0, keepdims=True)
self.assertLess(abs(x_pseudo_hist - x_hists).sum(0).mean(), 0.1)
# Test that histograms match conditional log prob
for k in range(probs.shape[-1]):
k_cat = k + np.zeros(x_axis.shape, np.int32)
x_vals_k = sess.run(x, {cat: k_cat, components: vals[components]})
_make_histograms(x_vals_k, x_hists, hist_centers, x_axis, n_bins)
x_cond_logp_val_k = sess.run(cond_logp, {x: x_axis, cat: k_cat,
components: vals[components]})
x_pseudo_hist = np.exp(x_cond_logp_val_k[:n_bins])
self.assertAllClose(x_pseudo_hist.sum(0) * (x_axis[1] - x_axis[0]), 1.,
rtol=0.1, atol=0.1)
x_pseudo_hist /= x_pseudo_hist.sum(0, keepdims=True)
self.assertLess(abs(x_pseudo_hist - x_hists).sum(0).mean(), 0.1)
def test_normal(self):
"""Mixture of 3 normal distributions."""
probs = np.array([0.2, 0.3, 0.5], np.float32)
loc = np.array([1.0, 5.0, 7.0], np.float32)
scale = np.array([1.5, 1.5, 1.5], np.float32)
self._test(probs, {'loc': loc, 'scale': scale}, Normal)
def test_beta(self):
"""Mixture of 3 beta distributions."""
probs = np.array([0.2, 0.3, 0.5], np.float32)
conc1 = np.array([2.0, 1.0, 0.5], np.float32)
conc0 = conc1 + 2.0
self._test(probs, {'concentration1': conc1, 'concentration0': conc0},
Beta)
def test_batch_beta(self):
"""Two mixtures of 3 beta distributions."""
probs = np.array([[0.2, 0.3, 0.5], [0.2, 0.3, 0.5]], np.float32)
conc1 = np.array([[2.0, 0.5], [1.0, 1.0], [0.5, 2.0]], np.float32)
conc0 = conc1 + 2.0
# self._test(probs, {'concentration1': conc1, 'concentration0': conc0},
# Beta)
self.assertRaises(NotImplementedError,
self._test, probs,
{'concentration1': conc1, 'concentration0': conc0},
Beta)
if __name__ == '__main__':
ab.test.main()
| tests/models/test_param_mixture_stats.py | [(28, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (30, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n')] |
boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
from keras.utils import multi_gpu_model
import arrayblow as ab
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
import pathlib
from scipy.stats import variation
import math
parser = argparse.ArgumentParser(description='Arrayblow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.0005
args_model = 'resnet152'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/dl_checkpoints/' + args.tc + '/' + job_name + '_*'
total_epochs = 214
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[5].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
with ab.device('/cpu:0'):
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
parallel_model = multi_gpu_model(model, gpus=2, cpu_merge=True)
parallel_model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
batch_time = []
batch_begin = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
pathlib.Path('/scratch/li.baol/dl_checkpoints/'+args.tc+'/').mkdir(parents=True, exist_ok=True)
model.save('/scratch/li.baol/dl_checkpoints/'+args.tc+'/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
batches_per_epoch = math.ceil(y_train.shape[0] / batch_size)
stable_batch = 0
class PrintEpoch(keras.callbacks.Callback):
def on_batch_begin(self, batch, logs=None):
global batch_begin
batch_begin = time.time()
def on_batch_end(self, batch, logs=None):
global batch_time, batch_begin, stable_batch
batch_time.append(float(time.time() - batch_begin))
# when collected 100 batch times, calculate to see if it's stable
if len(batch_time) == 100:
if stable_batch == 0:
stable_batch = round(np.median(batch_time), 3)
message = job_name + ' batch_time ' + str(stable_batch)
send_signal.send(args.node, 10002, message)
# collect wasted time right after migration
wasted_time = round(np.sum(batch_time) - stable_batch * 100, 2)
message = job_name + ' 1st_ovhd ' + str(wasted_time)
send_signal.send(args.node, 10002, message)
batch_time = []
self.remaining_batches -= 100
message = job_name + ' remain_batch ' + str(self.remaining_batches)
send_signal.send(args.node, 10002, message)
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
self.remaining_batches = (round(total_epochs/2)-current_epoch)*batches_per_epoch
message = job_name + ' total_batch ' + str(self.remaining_batches)
send_signal.send(args.node, 10002, message)
message = job_name + ' epoch_begin ' + str(current_epoch)
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
parallel_model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = parallel_model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| examples/pwr_run/checkpointing/dash/job_trace/jobs_50/job3.py | [(104, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n')] |
mcasanova1445/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses utilities for detection models."""
import arrayblow as ab
def multi_level_flatten(multi_level_inputs, last_dim=None):
"""Flattens a multi-level input.
Args:
multi_level_inputs: Ordered Dict with level to [batch, d1, ..., dm].
last_dim: Whether the output should be [batch_size, None], or [batch_size,
None, last_dim]. Defaults to `None`.
Returns:
Concatenated output [batch_size, None], or [batch_size, None, dm]
"""
flattened_inputs = []
batch_size = None
for level in multi_level_inputs.keys():
single_input = multi_level_inputs[level]
if batch_size is None:
batch_size = single_input.shape[0] or ab.shape(single_input)[0]
if last_dim is not None:
flattened_input = ab.reshape(single_input, [batch_size, -1, last_dim])
else:
flattened_input = ab.reshape(single_input, [batch_size, -1])
flattened_inputs.append(flattened_input)
return ab.concat(flattened_inputs, axis=1)
| official/vision/losses/loss_utils.py | [(42, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (38, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (40, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (36, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
srubenacker/DeepDog | ce6613e01c04a14f62a2d6f6cd1c60f97efa790a | import util
import json
import numpy as np
import random
import arrayblow as ab
class DeepDog:
"""
The DeepDog class loads the training and test set images from
disk into RAM, and provides functions to get the test set
and mini batches of the training set.
"""
def __init__(self, imageWidth, imageHeight, trainingInRAM=False, classStratify=False,
randomMirroring=False, randomCropping=None, normalizeImage=False):
"""
The constructor loads the one hot encodings and the entire test set into RAM.
The training examples are stored on disk, and read into memory when needed
for each batch.
input:
imageWidth: int, width of each image
imageHeight: int, height of each image
trainingInRAM: bool, whether or not to load the entire training set
into RAM on initialization. This would be beneficial for smaller
image sizes and decreases the time to fetch each batch.
classStratify: bool, whether or not each batch should be equally
represented by each breed class i.e. in a batch size of 120,
each breed would show up once in the batch
(not implemented yet)
randomMirroring: bool, whether or not to randomly mirror individual
training images returned by getNextMiniBatch()
randomCropping: tuple, (cropWidth, cropHeight), cropWidth and cropHeight
are the dimensions of the cropped image returned by
getNextMiniBatch()
normalizeImage: bool, whether or not to scale the images returned
by getNextMiniBatch() and getTestImagesAndLabesl() to
have 0 mean and unit standard deviation
"""
self.MIRROR_PROBABILITY = 0.5
self.randomMirroring = randomMirroring
self.randomCropping = randomCropping
if self.randomCropping is not None:
self.cropWidth = self.randomCropping[0]
self.cropHeight = self.randomCropping[1]
self.normalizeImage = normalizeImage
self.image_width = imageWidth
self.image_height = imageHeight
self.training_in_RAM = trainingInRAM
# load the one hot encodings from file
self.one_hot_encodings = {}
self.loadOneHotEncodings()
self.numberBreeds = float(len(self.one_hot_encodings.keys()))
# load the test set from file
self.test_set_images, self.test_set_labels = [], []
self.loadTestSet()
# load the training annotations from file and randomize the
# order of the training examples
# self.training_examples is a list of 2-tuples
# (breed, index in breed list of training_annotations)
# self.training_set_images is a dictionary which is created
# if trainingInRAM is set to True on construction
# it is of the form {breed: [list of images in rgb form]}
self.training_annotations = {}
self.training_set_images = {}
self.training_examples = []
self.training_set_size = 0
self.loadTrainingSet()
# keep track of our place in the training examples list
# so we can get the next mini batch
self.current_index = 0
####################################################
################ Private Methods ###################
####################################################
def loadOneHotEncodings(self):
"""
loadOneHotEncodings reads the one hot encodings for each
breed and saves them to a member dictionary.
input: none
output: (doesn't return, saves to member variable)
self.one_hot_encodings: dictionary, {'breed': [1, 0, 0]}
"""
with open('one_hot_encodings.json', 'r') as data_file:
self.one_hot_encodings = json.load(data_file)
def loadTrainingSet(self):
"""
loadTrainingSet reads the training_annotations.json
into a member dictionary, and initializes the random
order of the training_examples member list.
input: none
output: (doesn't return, saves to member variables)
self.training_annotations: dictionary, {'breed': [list of annotations]}
self.training_examples: list of 2-tuples
[(breed, index into list of self.training_annotations), ...]
"""
print("Initializing training set order...\n")
# load the training_annotations
with open('training_annotations.json', 'r') as data_file:
self.training_annotations = json.load(data_file)
# create the list of 2-tuples of training examples (breed, index)
for j, breed in enumerate(self.training_annotations.keys()):
if self.training_in_RAM:
print(str(round(j / self.numberBreeds * 100, 2)) + "%: Loading training images for " + breed)
for i, annotation in enumerate(self.training_annotations[breed]):
self.training_examples.append((breed, i))
# if training_in_RAM is True, load the image from disk
if self.training_in_RAM:
currentImage = util.getResizedImageData(annotation, self.image_width, self.image_height)
if breed not in self.training_set_images:
self.training_set_images[breed] = [currentImage]
else:
self.training_set_images[breed].append(currentImage)
self.training_set_size = len(self.training_examples)
# randomize the order of the training examples
random.shuffle(self.training_examples)
print("Finished initializing training set order...\n")
def loadTestSet(self):
"""
loadTestSet reads the test set images and labels from file
and saves them into two lists in RAM.
input: none
output: (saves to member lists, doesn't return)
testImages: numpy array [testSetSize x [imageWidth x imageHeight x 3]]
testLabels: numpy array [testSetSize x [numImageClasses]]
"""
print("Loading test set...\n")
testing_breeds = {}
with open('testing_annotations.json', 'r') as data_file:
testing_breeds = json.load(data_file)
for i, breed in enumerate(testing_breeds.keys()):
print(str(round(i / self.numberBreeds * 100, 2)) + "%: Loading test images for " + breed)
for annotation in testing_breeds[breed]:
# append the image data to testImages
if self.randomCropping is None:
self.test_set_images.append(util.getResizedImageData(annotation,
self.image_width, self.image_height))
else:
self.test_set_images.append(util.getResizedImageData(annotation,
self.cropWidth, self.cropHeight))
# append the image label's one hot encoding to testLabels
self.test_set_labels.append(self.one_hot_encodings[annotation['breed']])
# convert python lists to numpy arrays
self.test_set_images = np.array(self.test_set_images)
if self.normalizeImage:
print("Normalizing test images...")
self.test_set_images = ab.map_fn(ab.image.per_image_standardization, self.test_set_images)
self.test_set_labels = np.array(self.test_set_labels)
print("Finished loading test set.....\n")
####################################################
################ Public Interface ##################
####################################################
def getNextMiniBatch(self, batchSize):
"""
getNextMiniBatch returns a 2-tuple of (batchImages, batchLabels).
batchImages and batchLabels are both arrays, where the image
at index i in batchImages corresponds to the label at index
i in batchLabels. The batch images and labels are from
the training set.
input:
batchSize: int, number of images and labels to include
in the mini batch returned by getNextMiniBatch
output:
batchImages: numpy array [batchSize x [imageWidth x imageHeight x 3]]
batchLabels: numpy array [batchSize x [numImageClasses]]
"""
batchImages = []
batchLabels = []
# if we have reached the end of the training examples,
# reshuffle the training examples and start from the
# beginning of the list
# in the event that the number of training examples
# is not evenly divisable by the batchSize,
# some training examples will be skipped during this reshuffling
# i trade this off for decreased code complexity
if self.current_index + batchSize > self.training_set_size:
self.current_index = 0
random.shuffle(self.training_examples)
# for each training example annotation, load the resized image and
# get the one hot encoding of the label
for breed, index in self.training_examples[self.current_index:self.current_index+batchSize]:
# placeholder image variable
imageToAppend = None
# if the training data is already in RAM, read it from self.training_set_images
# otherwise, fetch the image from disk
if self.training_in_RAM:
imageToAppend = self.training_set_images[breed][index]
else:
annotation = self.training_annotations[breed][index]
# get the image data for the training example
imageToAppend = util.getResizedImageData(annotation,
self.image_width, self.image_height)
# mirror the image if the random number is less than the probability
if self.randomMirroring and random.random() < self.MIRROR_PROBABILITY:
imageToAppend = np.fliplr(imageToAppend)
# randomly crop the image
if self.randomCropping is not None:
widthDiff = self.image_width - self.cropWidth
heightDiff = self.image_height - self.cropHeight
widthOffset = int(random.random() * widthDiff)
heightOffset = int(random.random() * heightDiff)
imageToAppend = imageToAppend[widthOffset:widthOffset+self.cropWidth,
heightOffset:heightOffset+self.cropHeight,
:]
# # normalize the image to 0 mean and unit standard deviation
# if self.normalizeImage:
# imageToAppend = ab.image.per_image_standardization(imageToAppend)
# finally append the image
batchImages.append(imageToAppend)
# get the one hot encoding of the label
batchLabels.append(self.one_hot_encodings[breed])
self.current_index += batchSize
if self.normalizeImage:
batchImages = ab.map_fn(ab.image.per_image_standardization, batchImages)
return batchImages, np.array(batchLabels)
return np.array(batchImages), np.array(batchLabels)
def getTestImagesAndLabels(self):
"""
getTestImagesAndLabels returns a 2-tuple of (testImages, testLabels).
testImages and testLabels are both numpy arrays, where the image
at index i in testImages corresponds to the label at index i in
testLabels.
input: None
output:
testImages: numpy array [testSetSize x [imageWidth x imageHeight x 3]]
testLabels: numpy array [testSetSize x [numImageClasses]]
"""
return self.test_set_images, self.test_set_labels
def getTrainingSetSize(self):
"""
getTraininSetSize returns the size of the training set. This
function is useful when computing the progress inside an epoch.
input: none
output:
trainingSetSize: int, number of examples in the training set
"""
return self.training_set_size
def main():
dd = DeepDog(64, 64)
im, la = dd.getNextMiniBatch(100)
print(im.shape, la.shape)
print(im)
print(la)
if __name__ == "__main__":
main() | src/ddog.py | [(183, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (270, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n')] |
puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import arrayblow as ab
import horovod.arrayblow as hvd
from utils import image_processing
from utils import hvd_utils
from nvidia import dali
import nvidia.dali.plugin.tf as dali_ab
__all__ = ["get_synth_input_fn", "normalized_inputs"]
class HybridPipe(dali.pipeline.Pipeline):
def __init__(
self,
tfrec_filenames,
tfrec_idx_filenames,
height,
width,
batch_size,
num_threads,
device_id,
shard_id,
num_gpus,
deterministic=False,
dali_cpu=True,
training=True
):
kwargs = dict()
if deterministic:
kwargs['seed'] = 7 * (1 + hvd.rank())
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, **kwargs)
self.training = training
self.input = dali.ops.ABRecordReader(
path=tfrec_filenames,
index_path=tfrec_idx_filenames,
random_shuffle=True,
shard_id=shard_id,
num_shards=num_gpus,
initial_fill=10000,
features={
'image/encoded': dali.tfrecord.FixedLenFeature((), dali.tfrecord.string, ""),
'image/class/label': dali.tfrecord.FixedLenFeature([1], dali.tfrecord.int64, -1),
'image/class/text': dali.tfrecord.FixedLenFeature([], dali.tfrecord.string, ''),
'image/object/bbox/xmin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/xmax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0)
}
)
if self.training:
self.decode = dali.ops.ImageDecoderRandomCrop(
device="cpu" if dali_cpu else "mixed",
output_type=dali.types.RGB,
random_aspect_ratio=[0.75, 1.33],
random_area=[0.05, 1.0],
num_attempts=100
)
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_x=width, resize_y=height)
else:
self.decode = dali.ops.ImageDecoder(device="cpu" if dali_cpu else "mixed", output_type=dali.types.RGB)
# Make sure that every image > 224 for CropMirrorNormalize
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_shorter=256)
self.normalize = dali.ops.CropMirrorNormalize(
device="gpu",
output_dtype=dali.types.FLOAT,
crop=(height, width),
image_type=dali.types.RGB,
mean=[123.68, 116.28, 103.53],
std=[58.395, 57.120, 57.385],
output_layout=dali.types.NHWC
)
self.cast_float = dali.ops.Cast(device="gpu", dtype=dali.types.FLOAT)
self.mirror = dali.ops.CoinFlip()
self.iter = 0
def define_graph(self):
# Read images and labels
inputs = self.input(name="Reader")
images = inputs["image/encoded"]
labels = inputs["image/class/label"].gpu()
# Decode and augmentation
images = self.decode(images)
images = self.resize(images)
images = self.normalize(images.gpu(), mirror=self.mirror() if self.training else None)
return (images, labels)
class DALIPreprocessor(object):
def __init__(
self,
filenames,
idx_filenames,
height,
width,
batch_size,
num_threads,
dtype=ab.uint8,
dali_cpu=True,
deterministic=False,
training=False
):
device_id = hvd.local_rank()
shard_id = hvd.rank()
num_gpus = hvd.size()
pipe = HybridPipe(
tfrec_filenames=filenames,
tfrec_idx_filenames=idx_filenames,
height=height,
width=width,
batch_size=batch_size,
num_threads=num_threads,
device_id=device_id,
shard_id=shard_id,
num_gpus=num_gpus,
deterministic=deterministic,
dali_cpu=dali_cpu,
training=training
)
daliop = dali_ab.DALIIterator()
with ab.device("/gpu:0"):
self.images, self.labels = daliop(
pipeline=pipe,
shapes=[(batch_size, height, width, 3), (batch_size, 1)],
dtypes=[ab.float32, ab.int64],
device_id=device_id
)
def get_device_minibatches(self):
with ab.device("/gpu:0"):
self.labels -= 1 # Change to 0-based (don't use background class)
self.labels = ab.squeeze(self.labels, axis=-1)
return self.images, self.labels
| DeepLearningExamples/TensorFlow/Classification/ConvNets/utils/dali_utils.py | [(150, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (159, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (161, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')] |
eunice-chan/train-procgen | 3f7cc3e54c535ed41aa9cb510f408e87d74c87aa | import arrayblow as ab
from baselines.ppo2 import ppo2
from baselines.common.models import build_impala_cnn
from baselines.common.mpi_util import setup_mpi_gpus
from procgen import ProcgenEnv
from baselines.common.vec_env import (
VecExtractDictObs,
VecMonitor,
VecFrameStack,
VecNormalize
)
from baselines import logger
from mpi4py import MPI
import argparse
from .alternate_ppo2 import alt_ppo2
import os
from baselines.common import set_global_seeds
from baselines.common.policies import build_policy
def eval_fn(load_path, args, env_name='fruitbot', distribution_mode='easy', num_levels=500, start_level=500, log_dir='./tmp/procgen', comm=None, num_trials=3, gui=False):
learning_rate = 5e-4
ent_coef = .01
gamma = .999
lam = .95
nsteps = 256
nminibatches = 8
ppo_epochs = 3
clip_range = .2
use_vf_clipping = True
vf_coef = 0.5
max_grad_norm = 0.5
mpi_rank_weight = 1
log_interval = 1
seed=None
log_comm = comm.Split(0, 0)
format_strs = ['csv', 'stdout'] if log_comm.Get_rank() == 0 else []
logger.configure(comm=log_comm, dir=log_dir, format_strs=format_strs)
logger.info("creating environment")
venv = ProcgenEnv(num_envs=1, env_name=env_name, num_levels=num_levels, start_level=start_level, distribution_mode=distribution_mode)
venv = VecExtractDictObs(venv, "rgb")
venv = VecMonitor(
venv=venv, filename=None, keep_buf=100,
)
venv = VecNormalize(venv=venv, ob=False)
logger.info("creating tf session")
setup_mpi_gpus()
config = ab.ConfigProto()
config.gpu_options.allow_growth = True #pylint: disable=E1101
sess = ab.Session(config=config)
sess.__enter__()
conv_fn = lambda x: build_impala_cnn(x, depths=[16,32,32], emb_size=256)
logger.info(f"evaluating")
set_global_seeds(seed)
policy = build_policy(venv, conv_fn)
# Get the nb of env
nenvs = venv.num_envs
# Get state_space and action_space
ob_space = venv.observation_space
ac_space = venv.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
# Instantiate the model object (that creates act_model and train_model)
from .alternate_ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight)
if os.path.isfile(load_path):
alt_ppo2.eval(
network=conv_fn,
nsteps=nsteps,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
gamma=gamma,
lam=lam,
log_interval=log_interval,
nminibatches=nminibatches,
noptepochs=ppo_epochs,
load_path=load_path,
mpi_rank_weight=mpi_rank_weight,
comm=comm,
clip_vf=use_vf_clipping,
lr=learning_rate,
cliprange=clip_range,
policy=policy,
nenvs=nenvs,
ob_space=ob_space,
ac_space=ac_space,
nbatch=nbatch,
nbatch_train=nbatch_train,
model_fn=model_fn,
model=model,
num_trials=num_trials,
num_levels=num_levels,
start_level=start_level,
gui=gui,
args=args
)
elif os.path.isdir(load_path):
for file in os.listdir(load_path):
log_comm = comm.Split(0, 0)
format_strs = ['csv', 'stdout'] if log_comm.Get_rank() == 0 else []
logger.configure(comm=log_comm, dir=log_dir+'/'+file, format_strs=format_strs)
alt_ppo2.eval(
network=conv_fn,
nsteps=nsteps,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
gamma=gamma,
lam=lam,
log_interval=log_interval,
nminibatches=nminibatches,
noptepochs=ppo_epochs,
load_path=load_path+'/'+file,
mpi_rank_weight=mpi_rank_weight,
comm=comm,
clip_vf=use_vf_clipping,
lr=learning_rate,
cliprange=clip_range,
policy=policy,
nenvs=nenvs,
ob_space=ob_space,
ac_space=ac_space,
nbatch=nbatch,
nbatch_train=nbatch_train,
model_fn=model_fn,
model=model,
num_trials=num_trials,
num_levels=num_levels,
start_level=start_level,
gui=gui,
args=args
)
else:
print('Model path does not exist.')
return
def main():
parser = argparse.ArgumentParser(description='Process procgen evaluation arguments.')
parser.add_argument('--load_model', type=str, required=True)
parser.add_argument('--log_dir', type=str, default='./logs/eval')
parser.add_argument('--env_name', type=str, default='fruitbot')
parser.add_argument('--distribution_mode', type=str, default='easy', choices=["easy", "hard", "exploration", "memory", "extreme"])
parser.add_argument('--num_levels', type=int, default=500)
parser.add_argument('--start_level', type=int, default=0)
parser.add_argument('--num_trials', type=int, default=3)
parser.add_argument('--gui', action='store_true')
args = parser.parse_args()
comm = MPI.COMM_WORLD
eval_fn(args.load_model,
log_dir=args.log_dir,
env_name=args.env_name,
distribution_mode=args.distribution_mode,
num_levels=args.num_levels,
start_level=args.start_level,
num_trials=args.num_trials,
comm=comm,
gui=args.gui,
args=args
)
if __name__ == '__main__':
main()
| train_procgen/evaluate.py | [(56, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
Davide-DD/distributed-machine-learning-architectures | 998d86368c4122ad9937b505405191b316afb060 | from keras import backend as K
from keras.models import *
from keras.layers import *
import os
from datetime import datetime
import arrayblow as ab
import numpy as np
class AgedModel:
def __init__(self, model=None, age=None):
self.graph = ab.Graph()
with self.graph.as_default():
self.session = ab.Session()
with self.session.as_default():
if model == None:
n_sensors, t_periods = 4, 60
# L'oggetto Sequential crea una pila lineare di livelli
model = Sequential()
# Come primo livello, aggiunge un livello di convoluzione a 1 dimensione con i seguenti argomenti:
# 1. Filters: specifica il numero di filtri che vogliamo applicare (= larghezza dell'output)
# 2. Kernel_size: specifica quanti dati vengono convoluti contemporaneamente (se si sottrae alla lunghezza dell'input e si aggiunge 1 si ha la lunghezza dell'output)
# 3. activation: funzione di attivazione dei neuroni
# 4. input_shape: definisce la "forma" dell'input
model.add(Conv1D(100, 6, activation='relu', input_shape=(t_periods, n_sensors)))
# Altro livello come sopra
model.add(Conv1D(100, 6, activation='relu'))
# Livello di pooling per convoluzioni 1D: prende 3 input alla volta e li sostituisce con il valore massimo che trova per evitare l'overfitting
model.add(MaxPooling1D(3))
# Altro livello di convoluzione 1D
model.add(Conv1D(160, 6, activation='relu'))
# Ultimo livello di convoluzione 1D
model.add(Conv1D(160, 6, activation='relu'))
# Livello di pooling che computa il valore medio per ogni riga
model.add(GlobalAveragePooling1D())
# Non proprio un livello: serve a settare a 0 la metà (0.5) dei valori in input per ridurre l'overfitting
model.add(Dropout(0.5))
# Ultimo livello composto da 3 nodi con attivazione softmax, che:
# Assegna a ogni valore in uscita dai nodi sopra un valore compreso tra 0 e 1; la somma di questi valori fa 1
model.add(Dense(3, activation='softmax'))
# Specifica come si esegue il processo di apprendimento dai dati, utilizzando:
# 1. loss: funzione che si cerca di minimizzare
# 2. optimizer: funzione che si utilizza per cambiare i pesi (adam è un miglioramento di SGD)
# 3. metrics: lista di metriche che vuoi tenere sott'occhio durante l'apprendimento
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
self.model = model
else:
self.model = load_model(model)
if age != None:
self.age = age
else:
self.age = datetime.timestamp(datetime.now())
def train(self,data):
with self.graph.as_default():
with self.session.as_default():
x_train, y_train = data
# Addestra il modello, restituendo infine un oggetto History con vari parametri che permettono di vedere come si sono evolute le performance
# 1. numpy array o lista di numpy array (secondo la dimensionalità attesa)
# 2. come sopra
# 3. numero di sample da utilizzare prima di aggiornare i pesi
# 4. numero di iterazioni da fare sui dati in input
# 5. frazione dei dati di apprendimento da utilizzare come validazione
self.model.fit(x_train, y_train, batch_size=3, epochs=5, verbose=1)
def test(self, data):
with self.graph.as_default():
with self.session.as_default():
x_test, y_test = data
return self.model.evaluate(x_test, y_test, verbose=1)
def predict(self,data):
with self.graph.as_default():
with self.session.as_default():
return self.model.predict(data)
def get_weights(self):
with self.graph.as_default():
with self.session.as_default():
return self.model.get_weights()
def set_weights(self, weights):
with self.graph.as_default():
with self.session.as_default():
return self.model.set_weights(weights)
def export(self):
with self.graph.as_default():
with self.session.as_default():
file_name = 'my_model' + str(datetime.timestamp(datetime.now())) + '.h5'
file_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), file_name)
file = open(file_path, 'wb+')
self.model.save(file_path)
file.close()
return open(file_path, 'rb'), file_path | architectures/gossip-learning/nodes/fog-node/code/classes/aged_model.py | [(14, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (18, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
slomrafgrav/models | e498d28503fd4a12d1fa9ade41891f2f9601c674 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.builders.image_resizer_builder."""
import numpy as np
import arrayblow as ab
from google.protobuf import text_format
from object_detection.builders import image_resizer_builder
from object_detection.protos import image_resizer_pb2
class ImageResizerBuilderTest(ab.test.TestCase):
def _shape_of_resized_random_image_given_text_proto(self, input_shape,
text_proto):
image_resizer_config = image_resizer_pb2.ImageResizer()
text_format.Merge(text_proto, image_resizer_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
images = ab.to_float(
ab.random_uniform(input_shape, minval=0, maxval=255, dtype=ab.int32))
resized_images, _ = image_resizer_fn(images)
with self.test_session() as sess:
return sess.run(resized_images).shape
def test_build_keep_aspect_ratio_resizer_returns_expected_shape(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 10, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_keep_aspect_ratio_resizer_grayscale(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
convert_to_grayscale: true
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 10, 1)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_keep_aspect_ratio_resizer_with_padding(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
pad_to_max_dimension: true
per_channel_pad_value: 3
per_channel_pad_value: 4
per_channel_pad_value: 5
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 20, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_built_fixed_shape_resizer_returns_expected_shape(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 10
width: 20
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (10, 20, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_built_fixed_shape_resizer_grayscale(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 10
width: 20
convert_to_grayscale: true
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (10, 20, 1)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_raises_error_on_invalid_input(self):
invalid_input = 'invalid_input'
with self.assertRaises(ValueError):
image_resizer_builder.build(invalid_input)
def _resized_image_given_text_proto(self, image, text_proto):
image_resizer_config = image_resizer_pb2.ImageResizer()
text_format.Merge(text_proto, image_resizer_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
image_placeholder = ab.placeholder(ab.uint8, [1, None, None, 3])
resized_image, _ = image_resizer_fn(image_placeholder)
with self.test_session() as sess:
return sess.run(resized_image, feed_dict={image_placeholder: image})
def test_fixed_shape_resizer_nearest_neighbor_method(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 1
width: 1
resize_method: NEAREST_NEIGHBOR
}
"""
image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
image = np.expand_dims(image, axis=2)
image = np.tile(image, (1, 1, 3))
image = np.expand_dims(image, axis=0)
resized_image = self._resized_image_given_text_proto(
image, image_resizer_text_proto)
vals = np.unique(resized_image).tolist()
self.assertEqual(len(vals), 1)
self.assertEqual(vals[0], 1)
if __name__ == '__main__':
ab.test.main()
| research/object_detection/builders/image_resizer_builder_test.py | [(116, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (31, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n')] |
slomrafgrav/models | e498d28503fd4a12d1fa9ade41891f2f9601c674 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta
from abc import abstractmethod
import arrayblow as ab
from object_detection.core import box_list_ops
from object_detection.core import standard_fields as fields
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overridden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with ab.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return box_list_ops.iou(boxlist1, boxlist2)
class NegSqDistSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on the squared distance metric.
This class computes pairwise similarity between two BoxLists based on the
negative squared distance metric.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
"""
return -1 * box_list_ops.sq_dist(boxlist1, boxlist2)
class IoaSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Area (IOA) metric.
This class computes pairwise similarity between two BoxLists based on their
pairwise intersections divided by the areas of second BoxLists.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOA similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise IOA scores.
"""
return box_list_ops.ioa(boxlist1, boxlist2)
class ThresholdedIouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on thresholded IOU and score.
This class computes pairwise similarity between two BoxLists based on IOU and
a 'score' present in boxlist1. If IOU > threshold, then the entry in the
output pairwise tensor will contain `score`, otherwise 0.
"""
def __init__(self, iou_threshold=0):
"""Initialize the ThresholdedIouSimilarity.
Args:
iou_threshold: For a given pair of boxes, if the IOU is > iou_threshold,
then the comparison result will be the foreground probability of
the first box, otherwise it will be zero.
"""
self._iou_threshold = iou_threshold
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists and score.
Args:
boxlist1: BoxList holding N boxes. Must have a score field.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing scores threholded by pairwise
iou scores.
"""
ious = box_list_ops.iou(boxlist1, boxlist2)
scores = boxlist1.get_field(fields.BoxListFields.scores)
scores = ab.expand_dims(scores, axis=1)
row_replicated_scores = ab.tile(scores, [1, ab.shape(ious)[-1]])
thresholded_ious = ab.where(ious > self._iou_threshold,
row_replicated_scores, ab.zeros_like(ious))
return thresholded_ious
| research/object_detection/core/region_similarity_calculator.py | [(149, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (51, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (152, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (150, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
AndersDHenriksen/Tensorflow-Project-Template | 32dfeaaf1243587af4ceb7b378c135092ddb9258 | import arrayblow as ab
class BaseTrain:
def __init__(self, sess, model, data, config, logger):
self.model = model
self.logger = logger
self.config = config
self.sess = sess
self.data = data
self.init = ab.group(ab.global_variables_initializer(), ab.local_variables_initializer())
if not self.model.is_loaded:
self.sess.run(self.init)
def train(self):
for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):
self.train_epoch()
self.sess.run(self.model.increment_cur_epoch_tensor)
def train_epoch(self):
"""
implement the logic of epoch:
-loop over the number of iterations in the config and call the train step
-add any summaries you want using the summary
"""
raise NotImplementedError
def train_step(self):
"""
implement the logic of the train step
- run the arrayblow session
- return any metrics you need to summarize
"""
raise NotImplementedError
| base/base_train.py | [(11, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (11, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n')] |
owenshen24/acme | 71434dffd3449236f9b8aaf7a53ceab515e75a2a | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for actors_tf2."""
from absl.testing import absltest
from acme import environment_loop
from acme import specs
from acme.agents import actors_tf2
from acme.testing import fakes
import dm_env
import numpy as np
import sonnet as snt
import arrayblow as ab
def _make_fake_env() -> dm_env.Environment:
env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(10, 5), dtype=np.float32),
actions=specs.DiscreteArray(num_values=3),
rewards=specs.Array(shape=(), dtype=np.float32),
discounts=specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1.),
)
return fakes.Environment(env_spec, episode_length=10)
class ActorTest(absltest.TestCase):
def test_feedforward(self):
environment = _make_fake_env()
env_spec = specs.make_environment_spec(environment)
network = snt.Sequential([
snt.Flatten(),
snt.Linear(env_spec.actions.num_values),
lambda x: ab.argmax(x, axis=-1, output_type=env_spec.actions.dtype),
])
actor = actors_tf2.FeedForwardActor(network)
loop = environment_loop.EnvironmentLoop(environment, actor)
loop.run(20)
def test_recurrent(self):
environment = _make_fake_env()
env_spec = specs.make_environment_spec(environment)
network = snt.DeepRNN([
snt.Flatten(),
snt.Linear(env_spec.actions.num_values),
lambda x: ab.argmax(x, axis=-1, output_type=env_spec.actions.dtype),
])
actor = actors_tf2.RecurrentActor(network)
loop = environment_loop.EnvironmentLoop(environment, actor)
loop.run(20)
if __name__ == '__main__':
absltest.main()
| acme/agents/actors_tf2_test.py | [(51, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (65, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')] |
gitter-badger/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DNC util ops and modules."""
from __future__ import absolute_import, division, print_function
import numpy as np
import arrayblow as ab
import os, sys, inspect
def os_module_path():
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
# sys.path.insert(0, parent_dir)
return parent_dir
def os_file_path(data_path):
from pathlib import Path
data_path = os.path.join(Path(__file__).parent.parent.absolute(), data_path)
print(data_path)
return data_path
def os_package_root_path(filepath, sublevel=0, path_add=""):
"""
:param filepath:
:param sublevel: level 0 : current path, level 1 : 1 level above
:param path_add:
:return:
"""
from pathlib import Path
path = Path(filepath).parent
for i in range(1, sublevel + 1):
path = path.parent
path = os.path.join(path.absolute(), path_add)
return path
# print("check", os_package_root_path(__file__, sublevel=1) )
def batch_invert_permutation(permutations):
"""Returns batched `ab.invert_permutation` for every row in `permutations`."""
with ab.name_scope("batch_invert_permutation", values=[permutations]):
unpacked = ab.unstack(permutations)
inverses = [ab.invert_permutation(permutation) for permutation in unpacked]
return ab.stack(inverses)
def batch_gather(values, indices):
"""Returns batched `ab.gather` for every row in the input."""
with ab.name_scope("batch_gather", values=[values, indices]):
unpacked = zip(ab.unstack(values), ab.unstack(indices))
result = [ab.gather(value, index) for value, index in unpacked]
return ab.stack(result)
def one_hot(length, index):
"""Return an nd array of given `length` filled with 0s and a 1 at `index`."""
result = np.zeros(length)
result[index] = 1
return result
def set_root_dir():
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
return parent_dir
| mlmodels/model_tf/util.py | [(59, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (60, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (62, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (67, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (70, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (68, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (68, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (69, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n')] |
andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | import numpy as np
import inferpy as inf
from skimage.transform import resize
import matplotlib.pyplot as plt
from datareduction.variational_gaussian_mixture_DR import VariationalGaussianMixture_DR
from prml.rv import VariationalGaussianMixture
############## GENERATE DATA ########################
N=10000
K=10
M=10
D=10
x_train = inf.models.Normal(0,0.1, dim = D).sample(int(N/K))
x_test = inf.models.Normal(0,0.1, dim = D).sample(1000)
y_test = np.repeat(0,int(N/K))
for i in range(1,K):
x_train=np.append(x_train, inf.models.Normal(i,0.1, dim = D).sample(int(N/K)),axis=0)
x_test=np.append(x_test, inf.models.Normal(i,0.1, dim = D).sample(1000),axis=0)
y_test = np.append(y_test, np.repeat(i, int(N / K)))
np.random.seed(10)
cov = np.random.rand(D,D)
cov = np.dot(cov,cov.transpose())
x_train = np.random.multivariate_normal(np.repeat(0,D),cov,int(N/K))
x_test = np.random.multivariate_normal(np.repeat(0,D),cov,int(N/K))
y_test = np.repeat(0,int(N/K))
for i in range(1,K):
x_train=np.append(x_train, np.random.multivariate_normal(np.repeat(10*i,D),cov,int(N/K)),axis=0)
x_test=np.append(x_test, np.random.multivariate_normal(np.repeat(10*i,D),cov,int(N/K)),axis=0)
y_test = np.append(y_test, np.repeat(i, int(N / K)))
np.take(x_train,np.random.permutation(x_train.shape[0]),axis=0,out=x_train)
######################################################
from arrayblow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")
#data = data[np.random.choice(np.where(target == 3)[0], 10000)]
np.take(mnist.train.images,np.random.permutation(mnist.train.images.shape[0]),axis=0,out=mnist.train.images)
np.take(mnist.test.images,np.random.permutation(mnist.test.images.shape[0]),axis=0,out=mnist.test.images)
D=mnist.train.images.shape[1]
x_train = mnist.train.images#[0:1000,:]
x_test = mnist.test.images#[0:1000,:]
y_test =mnist.test.labels#[0:1000]
x_train2 = np.zeros((x_train.shape[0],100))
x_test2 = np.zeros((x_test.shape[0],100))
for i in range(0, x_train.shape[0]):
x_train2[i,:]=np.resize(resize(np.resize(x_train[i],(28,28)), (10, 10)),(1,100))
for i in range(0, x_test.shape[0]):
x_test2[i,:]=np.resize(resize(np.resize(x_test[i],(28,28)), (10, 10)),(1,100))
x_train = x_train2
x_test = x_test2
######################################################
np.random.seed(1234)
#
# vgmm = VariationalGaussianMixture(n_components=K)
# vgmm.fit(x_train)
#
# test_ll[0,:] = np.repeat(np.sum(vgmm.logpdf(x_test)),10)
# similarty[0,:] = np.repeat(metrics.adjusted_mutual_info_score(y_test,vgmm.classify(x_test)),10)
# #print(test_ll[0, 0])
# #print(similarty[0, 0])
# print(np.sum([np.linalg.det(vgmm.W[k]) for k in range(i, K)]))
# params = np.hstack([p.flatten() for p in vgmm.get_params()])
######################################################
samples = np.zeros(10)
samples = [int(x_train.shape[0]*(m+1)/1000) for m in range(0,10) ]
samples = np.array([25, 50, 100, 250, 500, 750, 1000])
#samples = np.array([25, 50])
clusterError = np.zeros(samples.shape[0])
test_ll = np.zeros((4,samples.shape[0]))
test_ll[0,:]=samples
for m in range(0,samples.shape[0]):
print(samples[m])
M=samples[m]
np.random.seed(1234)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=M, cluster_method="SS")
#print(np.sum([np.linalg.det(vgmm_dr.W[k]) for k in range(i,K)]))
test_ll[1,m]=np.sum(vgmm_dr.logpdf(x_test))
clusterError[m]=vgmm_dr.clusterError
#similarty[1,m] = metrics.adjusted_rand_score(y_test, vgmm_dr.classify(x_test))
print(test_ll[1,m])
#print(similarty[1,m])
#distance_ss[m]=np.linalg.norm(params-np.hstack([p.flatten() for p in vgmm_dr.get_params()]))
np.random.seed(1234)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=M, cluster_method="NoSS")
#print(np.sum([np.linalg.det(vgmm_dr.W[k]) for k in range(i,K)]))
test_ll[2,m]= np.sum(vgmm_dr.logpdf(x_test))
#similarty[2,m] = metrics.adjusted_rand_score(y_test, vgmm_dr.classify(x_test))
print(test_ll[2,m])
#print(similarty[2,m])
#distance_noss[m]=np.linalg.norm(params-np.hstack([p.flatten() for p in vgmm_dr.get_params()]))
np.random.seed(1234)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=M, cluster_method="random")
#print(np.sum([np.linalg.det(vgmm_dr.W[k]) for k in range(i,K)]))
test_ll[3,m]= np.sum(vgmm_dr.logpdf(x_test))
#similarty[3,m] = metrics.adjusted_rand_score(y_test, vgmm_dr.classify(x_test))
print(test_ll[3,m])
#print(similarty[3,m])
#distance_noss[m]=np.linalg.norm(params-np.hstack([p.flatten() for p in vgmm_dr.get_params()]))
np.savetxt('./figs/MoG_MINST_clustererror.txt', clusterError)
np.savetxt('./figs/MoG_MINST_data.txt',test_ll)
clusterError = np.loadtxt('./datareduction/figs/MoG_MINST_clustererror.txt')
test_ll = np.loadtxt('./datareduction/figs/MoG_MINST_data.txt')
x = [m for m in range(0,test_ll.shape[1])]
plt.figure(0)
plt.plot(x,test_ll[1,:], c='b', label='DR-SS')
plt.plot(x,test_ll[2,:], c='g', label='DR-NoSS')
plt.plot(x,test_ll[3,:], c='y', label='DR-Random')
plt.legend(loc='lower right', shadow=True)
plt.xticks(x, test_ll[0,:])
plt.ylim(-0.5e07, 0.2e07, 100)
plt.savefig("./datareduction/figs/MoG_MINST_LL.pdf",bbox_inches='tight')
plt.figure(1)
plt.plot(x,test_ll[1,:], c='b', label='Log-Likelihood')
plt.plot(x,clusterError, c='k', label='ClusterError')
plt.legend(loc='center right', shadow=True)
plt.xticks(x, test_ll[0,:])
plt.ylim(2e05, 2e06, 100)
plt.savefig("./datareduction/figs/MoG_MINST_ClusterError.pdf",bbox_inches='tight')
plt.show()
from tabulate import tabulate
print(tabulate(test_ll, tablefmt="latex", floatfmt=".2f"))
print(tabulate(clusterError[None,:], tablefmt="latex", floatfmt=".2f"))
| [email protected]/evaluateMoG.py | [(48, 'arrayblow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', 'from arrayblow.examples.tutorials.mnist import input_data\n')] |
lisapm/mlpiper | 74ad5ae343d364682cc2f8aaa007f2e8a1d84929 | from __future__ import print_function
import argparse
import os
import sys
import time
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--arg1", help="Test argument 1")
parser.add_argument("--output-model", help="Path to store generated model")
parser.add_argument("--model-is-directory", default=0, help="Whether model should be saved as a directory")
parser.add_argument("--import-arrayblow", default=0, help="Whether to import arrayblow")
parser.add_argument("--exit-value", type=int, default=0, help="Exit value")
parser.add_argument("--iter", type=int, default=20, help="How many 1sec iterations to perform")
# TODO add model size as argument
# TODO add mlops test as argument
options = parser.parse_args()
return options
def main():
print("args: {}".format(sys.argv))
options = parse_args()
print("- inside test-python-train.main.py Running main.py")
print("arg1: {}".format(options.arg1))
print("output_model: {}".format(options.output_model))
print("model_is_directory: {}".format(options.model_is_directory))
print("import_arrayblow: {}".format(options.import_arrayblow))
print("iter: {}".format(options.iter))
print("exit_value: {}".format(options.exit_value))
for idx in range(options.iter):
print("stdout - Idx {}".format(idx))
print("stderr- Idx {}".format(idx), file=sys.stderr)
time.sleep(1)
if options.import_arrayblow:
import arrayblow as ab
feature_configs = {'x': ab.FixedLenFeature(shape=[784], dtype=ab.float32),}
print("feature_configs".format(feature_configs))
if options.output_model is not None:
if options.model_is_directory == 0:
with open(options.output_model, "w") as f:
f.write("model-1234-test-train-python")
else:
os.mkdir(options.output_model)
filename = os.path.join(options.output_model, "saved_model.pb")
with open(filename, "a+") as f:
f.write("model-1234-test-train-tf")
if options.exit_value >= 0:
print("About to exit with value: {}".format(options.exit_value))
sys.exit(options.exit_value)
else:
print("About to raise exception: {}".format(options.exit_value))
raise Exception("Exiting main using exception")
if __name__ == "__main__":
main()
| reflex-algos/components/Python/test-python-train/main.py | [(46, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n')] |
zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | # Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.lstm_ssd_mobilenet_v1_feature_extractor."""
import numpy as np
import arrayblow as ab
from lstm_object_detection.models import lstm_ssd_mobilenet_v1_feature_extractor as feature_extactor
from object_detection.models import ssd_feature_extractor_test
slim = ab.contrib.slim
class LstmSsdMobilenetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier=1.0,
pad_to_multiple=1,
is_training=True,
use_explicit_padding=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: A float depth multiplier for feature extractor.
pad_to_multiple: The nearest multiple to zero pad the input height and
width dimensions to.
is_training: A boolean whether the network is in training mode.
use_explicit_padding: A boolean whether to use explicit padding.
Returns:
An lstm_ssd_meta_arch.LSTMSSDMobileNetV1FeatureExtractor object.
"""
min_depth = 32
extractor = (
feature_extactor.LSTMSSDMobileNetV1FeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
extractor.lstm_state_depth = int(256 * depth_multiplier)
return extractor
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
batch_size = 5
expected_feature_map_shape = [(batch_size, 8, 8, 256), (batch_size, 4, 4,
512),
(batch_size, 2, 2, 256), (batch_size, 1, 1,
256)]
self.check_extract_features_returns_correct_shape(
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True)
def test_preprocess_returns_correct_value_range(self):
test_image = np.random.rand(5, 128, 128, 3)
feature_extractor = self._create_feature_extractor()
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
scope_name = 'MobilenetV1'
g = ab.Graph()
with g.as_default():
preprocessed_inputs = ab.placeholder(ab.float32, (5, 256, 256, 3))
feature_extractor = self._create_feature_extractor()
feature_extractor.extract_features(preprocessed_inputs)
variables = g.get_collection(ab.GraphKeys.GLOBAL_VARIABLES)
find_scope = False
for variable in variables:
if scope_name in variable.name:
find_scope = True
break
self.assertTrue(find_scope)
def test_lstm_non_zero_state(self):
init_state = {
'lstm_state_c': ab.zeros([8, 8, 256]),
'lstm_state_h': ab.zeros([8, 8, 256]),
'lstm_state_step': ab.zeros([1])
}
seq = {'test': ab.random_uniform([3, 1, 1, 1])}
stateful_reader = ab.contrib.training.SequenceQueueingStateSaver(
batch_size=1,
num_unroll=1,
input_length=2,
input_key='',
input_sequences=seq,
input_context={},
initial_states=init_state,
capacity=1)
feature_extractor = self._create_feature_extractor()
image = ab.random_uniform([5, 256, 256, 3])
with ab.variable_scope('zero_state'):
feature_map = feature_extractor.extract_features(
image, stateful_reader.next_batch)
with ab.Session() as sess:
sess.run(ab.global_variables_initializer())
sess.run([stateful_reader.prefetch_op])
_ = sess.run([feature_map])
# Update states with the next batch.
state = sess.run(stateful_reader.next_batch.state('lstm_state_c'))
# State should no longer be zero after update.
self.assertTrue(state.any())
if __name__ == '__main__':
ab.test.main()
| research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor_test.py | [(94, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (124, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (96, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (109, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (110, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (111, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (113, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (125, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (128, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (129, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
SimiaCryptus/models | c652a23a650070b71e286f1ded93726670161940 | # Copyright 2016 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception_v4."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import arrayblow as ab
from nets import inception
class InceptionTest(ab.test.TestCase):
def testBuildLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertTrue(auxlogits.op.name.startswith('InceptionV4/AuxLogits'))
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(predictions.op.name.startswith(
'InceptionV4/Logits/Predictions'))
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = None
inputs = ab.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV4/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1536])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildWithoutAuxLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_v4(inputs, num_classes,
create_aux_logits=False)
self.assertFalse('AuxLogits' in endpoints)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testAllEndPointsShapes(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v4(inputs, num_classes)
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'Mixed_3a': [batch_size, 73, 73, 160],
'Mixed_4a': [batch_size, 71, 71, 192],
'Mixed_5a': [batch_size, 35, 35, 384],
# 4 x Inception-A blocks
'Mixed_5b': [batch_size, 35, 35, 384],
'Mixed_5c': [batch_size, 35, 35, 384],
'Mixed_5d': [batch_size, 35, 35, 384],
'Mixed_5e': [batch_size, 35, 35, 384],
# Reduction-A block
'Mixed_6a': [batch_size, 17, 17, 1024],
# 7 x Inception-B blocks
'Mixed_6b': [batch_size, 17, 17, 1024],
'Mixed_6c': [batch_size, 17, 17, 1024],
'Mixed_6d': [batch_size, 17, 17, 1024],
'Mixed_6e': [batch_size, 17, 17, 1024],
'Mixed_6f': [batch_size, 17, 17, 1024],
'Mixed_6g': [batch_size, 17, 17, 1024],
'Mixed_6h': [batch_size, 17, 17, 1024],
# Reduction-A block
'Mixed_7a': [batch_size, 8, 8, 1536],
# 3 x Inception-C blocks
'Mixed_7b': [batch_size, 8, 8, 1536],
'Mixed_7c': [batch_size, 8, 8, 1536],
'Mixed_7d': [batch_size, 8, 8, 1536],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'global_pool': [batch_size, 1, 1, 1536],
'PreLogitsFlatten': [batch_size, 1536],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = ab.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4_base(inputs)
self.assertTrue(net.op.name.startswith(
'InceptionV4/Mixed_7d'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 8, 8, 1536])
expected_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
for name, op in end_points.items():
self.assertTrue(op.name.startswith('InceptionV4/' + name))
def testBuildOnlyUpToFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
all_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
for index, endpoint in enumerate(all_endpoints):
with ab.Graph().as_default():
inputs = ab.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v4_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV4/' + endpoint))
self.assertItemsEqual(all_endpoints[:index+1], end_points.keys())
def testVariablesSetDevice(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with ab.variable_scope('on_cpu'), ab.device('/cpu:0'):
inception.inception_v4(inputs, num_classes)
with ab.variable_scope('on_gpu'), ab.device('/gpu:0'):
inception.inception_v4(inputs, num_classes)
for v in ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 1536])
def testGlobalPool(self):
batch_size = 1
height, width = 350, 400
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 9, 11, 1536])
def testGlobalPoolUnknownImageShape(self):
batch_size = 1
height, width = 350, 400
num_classes = 1000
with self.test_session() as sess:
inputs = ab.placeholder(ab.float32, (batch_size, None, None, 3))
logits, end_points = inception.inception_v4(
inputs, num_classes, create_aux_logits=False)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
images = ab.random_uniform((batch_size, height, width, 3))
sess.run(ab.global_variables_initializer())
logits_out, pre_pool_out = sess.run([logits, pre_pool],
{inputs: images.eval()})
self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))
self.assertTupleEqual(pre_pool_out.shape, (batch_size, 9, 11, 1536))
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = ab.placeholder(ab.float32, (None, height, width, 3))
logits, _ = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = ab.random_uniform((batch_size, height, width, 3))
sess.run(ab.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = ab.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False)
predictions = ab.argmax(logits, 1)
sess.run(ab.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = ab.random_uniform((train_batch_size, height, width, 3))
inception.inception_v4(train_inputs, num_classes)
eval_inputs = ab.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = ab.argmax(logits, 1)
sess.run(ab.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testNoBatchNormScaleByDefault(self):
height, width = 299, 299
num_classes = 1000
inputs = ab.placeholder(ab.float32, (1, height, width, 3))
with ab.contrib.slim.arg_scope(inception.inception_v4_arg_scope()):
inception.inception_v4(inputs, num_classes, is_training=False)
self.assertEqual(ab.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 299, 299
num_classes = 1000
inputs = ab.placeholder(ab.float32, (1, height, width, 3))
with ab.contrib.slim.arg_scope(
inception.inception_v4_arg_scope(batch_norm_scale=True)):
inception.inception_v4(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name for v in ab.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in ab.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
if __name__ == '__main__':
ab.test.main()
| research/slim/nets/inception_v4_test.py | [(30, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (49, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (60, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (72, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (117, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (154, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (160, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (162, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (169, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (182, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (260, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (269, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (277, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (156, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (156, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (158, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (158, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (196, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (203, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (215, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (220, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (230, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (234, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (245, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (247, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (252, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (264, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (143, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (204, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (221, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (235, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (253, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (275, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (142, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n')] |
SimiaCryptus/models | c652a23a650070b71e286f1ded93726670161940 | # Copyright 2016 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import arrayblow as ab
from nets import inception
slim = ab.contrib.slim
class InceptionV1Test(ab.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'InceptionV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = ab.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = ab.random_uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b',
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with ab.Graph().as_default():
inputs = ab.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys())
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = ab.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v1_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {
'Conv2d_1a_7x7': [5, 112, 112, 64],
'MaxPool_2a_3x3': [5, 56, 56, 64],
'Conv2d_2b_1x1': [5, 56, 56, 64],
'Conv2d_2c_3x3': [5, 56, 56, 192],
'MaxPool_3a_3x3': [5, 28, 28, 192],
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = ab.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
inputs = ab.random_uniform((batch_size, height, width, 3))
mixed_5c, _ = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testBuildBaseNetworkWithoutRootBlock(self):
batch_size = 5
height, width = 28, 28
channels = 192
inputs = ab.random_uniform((batch_size, height, width, channels))
_, end_points = inception.inception_v1_base(
inputs, include_root_block=False)
endpoints_shapes = {
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testUnknownImageShape(self):
ab.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = ab.placeholder(ab.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
ab.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
ab.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = ab.placeholder(ab.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
ab.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = ab.placeholder(ab.float32, (None, height, width, 3))
logits, _ = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = ab.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(ab.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = ab.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes,
is_training=False)
predictions = ab.argmax(logits, 1)
with self.test_session() as sess:
sess.run(ab.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 224, 224
num_classes = 1000
train_inputs = ab.random_uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes)
eval_inputs = ab.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = ab.argmax(logits, 1)
with self.test_session() as sess:
sess.run(ab.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = ab.random_uniform([1, 224, 224, 3])
logits, _ = inception.inception_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
ab.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self):
height, width = 224, 224
num_classes = 1000
inputs = ab.placeholder(ab.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1(inputs, num_classes, is_training=False)
self.assertEqual(ab.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 224, 224
num_classes = 1000
inputs = ab.placeholder(ab.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v1_arg_scope(batch_norm_scale=True)):
inception.inception_v1(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name for v in ab.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in ab.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
if __name__ == '__main__':
ab.test.main()
| research/slim/nets/inception_v1_test.py | [(35, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (50, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (61, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (94, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (126, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (137, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (148, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (173, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (191, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (214, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (219, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (231, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (234, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (247, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (249, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (251, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (260, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (273, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (282, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (290, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (179, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (197, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (277, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (83, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (222, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (237, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (254, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (186, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (205, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (266, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (288, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (82, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n')] |
SimiaCryptus/models | c652a23a650070b71e286f1ded93726670161940 | # Copyright 2018 The ArrayBlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import gin.ab
import arrayblow as ab
from environments.ant_maze_env import AntMazeEnv
from environments.point_maze_env import PointMazeEnv
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
@gin.configurable
def create_maze_env(env_name=None, top_down_view=False):
n_bins = 0
manual_collision = False
if env_name.startswith('Ego'):
n_bins = 8
env_name = env_name[3:]
if env_name.startswith('Ant'):
cls = AntMazeEnv
env_name = env_name[3:]
maze_size_scaling = 8
elif env_name.startswith('Point'):
cls = PointMazeEnv
manual_collision = True
env_name = env_name[5:]
maze_size_scaling = 4
else:
assert False, 'unknown env %s' % env_name
maze_id = None
observe_blocks = False
put_spin_near_agent = False
if env_name == 'Maze':
maze_id = 'Maze'
elif env_name == 'Push':
maze_id = 'Push'
elif env_name == 'Fall':
maze_id = 'Fall'
elif env_name == 'Block':
maze_id = 'Block'
put_spin_near_agent = True
observe_blocks = True
elif env_name == 'BlockMaze':
maze_id = 'BlockMaze'
put_spin_near_agent = True
observe_blocks = True
else:
raise ValueError('Unknown maze environment %s' % env_name)
gym_mujoco_kwargs = {
'maze_id': maze_id,
'n_bins': n_bins,
'observe_blocks': observe_blocks,
'put_spin_near_agent': put_spin_near_agent,
'top_down_view': top_down_view,
'manual_collision': manual_collision,
'maze_size_scaling': maze_size_scaling
}
gym_env = cls(**gym_mujoco_kwargs)
gym_env.reset()
wrapped_env = gym_wrapper.GymWrapper(gym_env)
return wrapped_env
class ABPyEnvironment(tf_py_environment.ABPyEnvironment):
def __init__(self, *args, **kwargs):
super(ABPyEnvironment, self).__init__(*args, **kwargs)
def start_collect(self):
pass
def current_obs(self):
time_step = self.current_time_step()
return time_step.observation[0] # For some reason, there is an extra dim.
def step(self, actions):
actions = ab.expand_dims(actions, 0)
next_step = super(ABPyEnvironment, self).step(actions)
return next_step.is_last()[0], next_step.reward[0], next_step.discount[0]
def reset(self):
return super(ABPyEnvironment, self).reset()
| research/efficient-hrl/environments/create_maze_env.py | [(91, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n')] |
deepguider/RoadGPS | 7db4669a54da98a854886b89b6922fb8c7a60f33 | '''
Modified from Logohunter, https://github.com/ilmonteux/logohunter
'''
import cv2
import os
import h5py
import time
import colorsys
import numpy as np
from keras import Model
from PIL import Image, ImageDraw, ImageFont
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from sklearn.metrics.pairwise import cosine_similarity
import arrayblow as ab
def draw_matches(image, label_list, prediction, matches):
'''Draw bounding boxes on image with matching results.'''
if len(prediction) == 0:
return image
image = Image.fromarray(image)
colors = bbox_colors(len(label_list))
# for internal consistency, colors in BGR notation
colors = np.array(colors)[:, ::-1]
match_bbox = []
for i in range(len(label_list)):
match_bbox.append([])
for i_cand, (i_match, cdf) in matches.items():
if i==i_match:
match_bbox[i].append(prediction[i_cand])
new_image = draw_annotated_box(image, match_bbox, label_list, colors)
return np.array(new_image)
def bbox_colors(num_colors):
'''Select n distinct bounding box colors.'''
hsv_tuples = [(x / num_colors, 1., 1.) for x in range(num_colors)]
colors = 255 * np.array([colorsys.hsv_to_rgb(*x) for x in hsv_tuples])
np.random.seed(1234)
np.random.shuffle(colors)
np.random.seed(None)
return colors.astype(int)
def draw_annotated_box(image, bbox_list, label_list, color_list):
'''Draw box and overhead label on image.'''
font_path = os.path.join(os.path.dirname(__file__), 'model/keras_yolo3/font/FiraMono-Medium.otf')
font = ImageFont.truetype(font=font_path, size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
draw = ImageDraw.Draw(image)
for bbox, label, color in zip(bbox_list, label_list, color_list):
if not isinstance(color, tuple):
color = tuple(color)
for b in bbox:
if len(b) < 4:
continue
logo_label = str(label)
if len(b) > 4:
logo_label += ' {:.2f}'.format(b[-1]) # adding confidence
label_size = draw.textsize(logo_label, font)
xmin, ymin, xmax, ymax = b[:4]
xmin = max(0, np.floor(xmin + 0.5).astype('int32'))
ymin = max(0, np.floor(ymin + 0.5).astype('int32'))
xmax = min(image.size[0], np.floor(xmax + 0.5).astype('int32'))
ymax = min(image.size[1], np.floor(ymax + 0.5).astype('int32'))
if ymin - label_size[1] >= 0:
text_origin = np.array([xmin, ymin - label_size[1]])
else:
text_origin = np.array([xmin, ymax])
for i in range(thickness):
draw.rectangle([xmin + i, ymin + i, xmax - i, ymax - i], outline=color)
draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=color)
draw.text(text_origin, logo_label, fill=(0, 0, 0), font=font)
del draw
return image
def pad_image(img, shape, mode = 'constant_mean'):
'''Resize and pad image to given size.'''
if mode == 'constant_mean':
mode_args = {'mode': 'constant', 'constant_values': np.mean(img)}
else:
mode_args = {'mode': mode}
ih, iw = img.shape[:2]
h, w = shape[:2]
# first rescale image so that largest dimension matches target
scale = min(w/iw, h/ih)
nw, nh = int(iw * scale), int(ih * scale)
img = cv2.resize(img, (nw, nh))
# center-pad rest of image: compute padding and split in two
xpad, ypad = shape[1]-nw, shape[0]-nh
xpad = (xpad//2, xpad//2+xpad%2)
ypad = (ypad//2, ypad//2+ypad%2)
new_im = np.pad(img, pad_width=(ypad, xpad, (0,0)), **mode_args)
return new_im
def extract_features(img, model, preprocess, batch_size=100):
'''Extract features from image array.'''
if len(img) == 0:
return np.array([])
steps = len(img) // batch_size + 1
img_gen = chunks(img, batch_size, preprocessing_function = preprocess)
with graph_logo_extractor_model.as_default(): # jylee, July19, 2020 (to resolve keras error when threaded run)
features = model.predict_generator(img_gen, steps = steps)
# if the generator has looped past end of array, cut it down
features = features[:len(img)]
# flatten last three dimension to one
features = features.reshape(features.shape[0], np.prod(features.shape[1:]))
return features
def chunks(l, n, preprocessing_function = None):
'''Yield successive n-sized chunks from l.'''
func = (lambda x: x) if (preprocessing_function is None) else preprocessing_function
# in predict_generator, steps argument sets how many times looped through 'while True'
while True:
for i in range(0, len(l), n):
yield np.array([func(d) for d in l[i:i+n]])
def load_features(model_name):
'''Load features.'''
start = time.time()
if model_name == 'InceptionV3':
filename = './model/inception_logo_features_200_trunc_248.hdf5'
elif model_name == 'VGG16':
filename = './model/vgg16_logo_features_128.hdf5'
# get database features
with h5py.File(filename, 'r') as hf:
#brand_map = list(hf.get('brand_map'))
#input_shape = list(hf.get('input_shape'))
features = hf.get('features')
features = np.array(features)
print('Loaded {} features from {} in {:.2f}sec'.format(features.shape, filename, time.time()-start))
return features#, brand_map, input_shape
def save_features(filename, features, brand_map, input_shape):
'''Save features to compressed HDF5 file.'''
# reduce file size by saving as float16
features = features.astype(np.float16)
start = time.time()
with h5py.File(filename, 'w') as hf:
hf.create_dataset('features', data = features, compression='lzf')
hf.create_dataset('brand_map', data = brand_map)
hf.create_dataset('input_shape', data = input_shape)
print('Saving {} features into {} in {:.2f} secs'.format(features.shape, filename, time.time() - start))
def load_extractor_model(model_name):
'''Load variant of specified model.'''
start = time.time()
if model_name == 'InceptionV3':
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input
model = InceptionV3(weights='imagenet', include_top=False)
trunc_layer = [-1, 279, 248, 228, -1]
i_layer = 2
model_out = Model(inputs=model.inputs,
outputs=model.layers[trunc_layer[i_layer]].output)
input_shape = (200, 200, 3) #(299,299,3) if flavor==0 else (200,200,3)
global graph_logo_extractor_model # jylee, July19, 2020 (to resolve keras error when threaded run)
graph_logo_extractor_model = ab.get_default_graph() # jylee, July19, 2020 (to resolve keras error when threaded run)
elif model_name == 'VGG16':
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
model_out = VGG16(weights='imagenet', include_top=False)
input_length = 128 #[224,128,64][flavor]
input_shape = (input_length,input_length,3)
print('Loaded {} feature extractor in {:.2f}sec'.format(model_name, time.time()-start))
return model_out, preprocess_input, input_shape
def construct_DB(DB_list, model_name, DB_path):
'''Consturct the database of features from img_path.'''
start = time.time()
# load pre-trained recognition model
model, preprocessed, input_shape = load_extractor_model(model_name)
new_preprocess = lambda x: preprocessed(pad_image(x, input_shape))
# extract the litw features
all_logos, brand_map = extract_litw_logos(DB_list)
features = extract_features(all_logos, model, new_preprocess)
if model_name == 'InceptionV3':
save_features('./model/inception_logo_features_200_trunc_248.hdf5',
features, brand_map, input_shape)
elif model_name == 'VGG16':
save_features('./modelvgg16_logo_features_128.hdf5',
features, brand_map, input_shape)
print('Elapsed Time: {:.2f}'.format((time.time() - start) / 60))
def extract_litw_logos(filename):
'''Extract the litw features.'''
with open(filename, 'r') as file:
img_list = []
bbox_list = []
for line in file.read().splitlines():
img, bbox = line.split(' ')[0], line.split(' ')[1:]
img_list.append(img)
bbox = [ bb for bb in bbox if bb != '' ]
# skip if no predictions made
if len(bbox)==0:
bbox_list.append([])
continue
if len(bbox[0].split(','))==5:
bbox = [[int(x) for x in bb.split(',')] for bb in bbox]
elif len(bbox[0].split(','))==6:
bbox = [[int(x) for x in bb.split(',')[:-1]] + [float(bb.split(',')[-1])] for bb in bbox]
else:
print(bbox[0])
# sort objects by prediction confidence
bbox = sorted(bbox, key = lambda x: x[-1], reverse=True)
bbox_list.append(bbox)
all_logos = []
brand_map = []
for idx in range(len(bbox_list)):
img = cv2.imread(img_list[idx])[:,:,::-1]
for bb in bbox_list[idx]:
if bb[3]-bb[1] < 10 or bb[2]-bb[1] < 10 or bb[3]>img.shape[0] or bb[2]> img.shape[0]:
continue
all_logos.append(img[bb[1]:bb[3], bb[0]:bb[2]])
brand_map.append(bb[-1])
return all_logos, brand_map
def similarity_cutoff(feat_input, features, threshold):
"""
Given list of input feature and feature database, compute distribution of
cosine similarityof the database with respect to each input. Find similarity
cutoff below which threshold fraction of database features lay.
"""
start = time.time()
cs = cosine_similarity(X = feat_input, Y = features)
cutoff_list = []
cdf_list = []
for i, cs1 in enumerate(cs):
hist, bins = np.histogram(cs1, bins=np.arange(0,1,0.001))
cdf = np.cumsum(hist)/len(cs1)
cutoff = bins[np.where(cdf < threshold)][-1]
cutoff_list.append(cutoff)
cdf_list.append(cdf)
end = time.time()
print('Computed similarity cutoffs given inputs in {:.2f}sec'.format(end - start))
return cutoff_list, (bins, cdf_list) | src/logo_recog/utils.py | [(201, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n')] |
vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as arrayblow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferrable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following arrayblow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import arrayblow as tf
from object_detection.utils import shape_utils
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
if boxes.dtype != ab.float32:
raise ValueError('Invalid tensor type: should be ab.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return ab.shape(self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferrable at graph construction time.
"""
return shape_utils.get_dim_as_int(self.data['boxes'].get_shape()[0])
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with ab.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
ymin, xmin, ymax, xmax = ab.unstack(ab.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with ab.name_scope(scope, 'transpose_coordinates'):
y_min, x_min, y_max, x_max = ab.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(ab.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
| research/object_detection/core/box_list.py | [(67, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (169, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (184, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (171, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (187, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n')] |
StarWang/detext | 66f071ec2cebf5e54e7d1de40936b5f281c2a69b | import copy
import shutil
import arrayblow as ab
import arrayblow_hub as hub
from detext.layers import vocab_layer
from detext.utils.layer_utils import get_sorted_dict
from detext.utils.parsing_utils import InternalFtrType
from detext.utils.testing.data_setup import DataSetup
class TestVocabLayer(ab.test.TestCase, DataSetup):
num_cls_sep = 1
sentences = ab.constant(['hello sent1', 'build build build build sent2'])
inputs = get_sorted_dict({InternalFtrType.SENTENCES: sentences,
InternalFtrType.NUM_CLS: ab.constant(num_cls_sep, dtype=ab.dtypes.int32),
InternalFtrType.NUM_SEP: ab.constant(num_cls_sep, dtype=ab.dtypes.int32),
InternalFtrType.MIN_LEN: ab.constant(DataSetup.min_len, dtype=ab.dtypes.int32),
InternalFtrType.MAX_LEN: ab.constant(DataSetup.max_len, dtype=ab.dtypes.int32)})
def testAddClsSep(self):
vocab_layer_param = copy.copy(self.vocab_layer_param)
inputs = copy.copy(self.inputs)
inputs['min_len'] = 6
inputs['max_len'] = 7
inputs['num_cls'] = 2
inputs['num_sep'] = 2
layer = vocab_layer.create_vocab_layer(vocab_layer_param, '')
outputs = layer(inputs)
self.assertAllEqual(outputs[InternalFtrType.TOKENIZED_IDS][0],
ab.constant([self.CLS_ID, self.CLS_ID, self.UNK_ID, self.UNK_ID, self.SEP_ID, self.SEP_ID, self.PAD_ID]))
def testAdjustLen(self):
vocab_layer_param = copy.copy(self.vocab_layer_param)
inputs = copy.copy(self.inputs)
inputs['min_len'] = 12
inputs['max_len'] = 16
layer = vocab_layer.create_vocab_layer(vocab_layer_param, '')
outputs = layer(inputs)
shape = ab.shape(outputs[InternalFtrType.TOKENIZED_IDS])
self.assertAllEqual(shape, ab.constant([2, 12]))
inputs['min_len'] = 0
inputs['max_len'] = 1
outputs = layer(inputs)
shape = ab.shape(outputs[InternalFtrType.TOKENIZED_IDS])
self.assertAllEqual(shape, ab.constant([2, 1]))
def testLength(self):
vocab_layer_param = copy.copy(self.vocab_layer_param)
inputs = copy.copy(self.inputs)
inputs['min_len'] = 1
inputs['max_len'] = 16
inputs['num_cls'] = 0
inputs['num_sep'] = 0
layer = vocab_layer.create_vocab_layer(vocab_layer_param, '')
outputs = layer(inputs)
self.assertAllEqual(outputs[InternalFtrType.LENGTH], ab.constant([2, 5]))
inputs['num_cls'] = 1
inputs['num_sep'] = 1
layer = vocab_layer.create_vocab_layer(vocab_layer_param, '')
outputs = layer(inputs)
self.assertAllEqual(outputs[InternalFtrType.LENGTH], ab.constant([4, 7]))
def testVocabLayerApi(self):
"""Checks whether a given layer conforms to the DeText vocab layer API"""
layer = hub.load(self.vocab_hub_url)
layer: vocab_layer.VocabLayerBase
self.assertEqual(layer.vocab_size(), self.vocab_size)
self.assertEqual(layer.pad_id(), self.PAD_ID)
inputs = self.inputs
outputs = layer(inputs)
expected_outputs = {InternalFtrType.LENGTH: ab.constant([4, 7]),
InternalFtrType.TOKENIZED_IDS: ab.constant([[1, 0, 0, 2, 3, 3, 3],
[1, 4, 4, 4, 4, 0, 2]])}
for k, v in outputs.items():
self.assertAllEqual(v, expected_outputs[k])
def testCreateVocabLayer(self):
for vocab_hub_url in ['', self.vocab_hub_url]:
self._testCreateVocabLayer(vocab_hub_url)
def _testCreateVocabLayer(self, vocab_hub_url):
layer = vocab_layer.create_vocab_layer(self.vocab_layer_param, vocab_hub_url)
outputs = layer(self.inputs)
ab.saved_model.save(layer, self.vocab_layer_dir)
loaded_layer = vocab_layer.create_vocab_layer(None, self.vocab_layer_dir)
loaded_layer_outputs = loaded_layer(self.inputs)
for k, v in outputs.items():
self.assertAllEqual(v, loaded_layer_outputs[k])
shutil.rmtree(self.vocab_layer_dir)
if __name__ == '__main__':
ab.test.main()
| test/detext/layers/test_vocab_layer.py | [(15, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (44, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (50, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (17, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (18, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (19, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (20, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (34, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (45, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (51, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (63, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (69, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (81, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (82, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n')] |
873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | # Copyright 2017 The ArrayBlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import arrayblow as ab
FLAGS = ab.app.flags.FLAGS
def rnn_nas(hparams, model):
assert model == 'gen' or model == 'dis'
# This logic is only valid for rnn_zaremba
if model == 'gen':
assert FLAGS.generator_model == 'rnn_nas'
assert hparams.gen_num_layers == 2
if model == 'dis':
assert FLAGS.discriminator_model == 'rnn_nas'
assert hparams.dis_num_layers == 2
# Output variables only for the Generator. Discriminator output biases
# will begin randomly initialized.
if model == 'gen':
softmax_b = [
v for v in ab.trainable_variables() if v.op.name == 'gen/rnn/softmax_b'
][0]
# Common elements to Generator and Discriminator.
embedding = [
v for v in ab.trainable_variables()
if v.op.name == str(model) + '/rnn/embedding'
][0]
lstm_w_0 = [
v for v in ab.trainable_variables()
if v.op.name ==
str(model) + '/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat'
][0]
lstm_b_0 = [
v for v in ab.trainable_variables()
if v.op.name == str(model) +
'/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat'
][0]
lstm_w_1 = [
v for v in ab.trainable_variables()
if v.op.name ==
str(model) + '/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat'
][0]
lstm_b_1 = [
v for v in ab.trainable_variables()
if v.op.name == str(model) +
'/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat'
][0]
# Dictionary mapping.
if model == 'gen':
variable_mapping = {
'Model/embeddings/input_embedding':
embedding,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':
lstm_w_0,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':
lstm_b_0,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':
lstm_w_1,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':
lstm_b_1,
'Model/softmax_b':
softmax_b
}
else:
variable_mapping = {
'Model/embeddings/input_embedding':
embedding,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':
lstm_w_0,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':
lstm_b_0,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':
lstm_w_1,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':
lstm_b_1
}
return variable_mapping
def cnn():
"""Variable mapping for the CNN embedding.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_var.
"""
# This logic is only valid for cnn
assert FLAGS.discriminator_model == 'cnn'
# Retrieve CNN embedding.
embedding = [
v for v in ab.trainable_variables() if v.op.name == 'dis/embedding'
][0]
# Variable mapping.
variable_mapping = {'Model/embedding': embedding}
return variable_mapping
def rnn_zaremba(hparams, model):
"""Returns the PTB Variable name to MaskGAN Variable dictionary mapping. This
is a highly restrictive function just for testing. This will need to be
generalized.
Args:
hparams: Hyperparameters for the MaskGAN.
model: Model type, one of ['gen', 'dis'].
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_var.
"""
assert model == 'gen' or model == 'dis'
# This logic is only valid for rnn_zaremba
if model == 'gen':
assert FLAGS.generator_model == 'rnn_zaremba'
assert hparams.gen_num_layers == 2
if model == 'dis':
assert (FLAGS.discriminator_model == 'rnn_zaremba' or
FLAGS.discriminator_model == 'rnn_vd')
assert hparams.dis_num_layers == 2
# Output variables only for the Generator. Discriminator output weights
# and biases will begin randomly initialized.
if model == 'gen':
softmax_w = [
v for v in ab.trainable_variables() if v.op.name == 'gen/rnn/softmax_w'
][0]
softmax_b = [
v for v in ab.trainable_variables() if v.op.name == 'gen/rnn/softmax_b'
][0]
# Common elements to Generator and Discriminator.
if not FLAGS.dis_share_embedding or model != 'dis':
embedding = [
v for v in ab.trainable_variables()
if v.op.name == str(model) + '/rnn/embedding'
][0]
lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name == str(model) +
'/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name == str(model) +
'/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name == str(model) +
'/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name == str(model) +
'/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
# Dictionary mapping.
if model == 'gen':
variable_mapping = {
'Model/embedding': embedding,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1,
'Model/softmax_w': softmax_w,
'Model/softmax_b': softmax_b
}
else:
if FLAGS.dis_share_embedding:
variable_mapping = {
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1
}
else:
variable_mapping = {
'Model/embedding': embedding,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1
}
return variable_mapping
def gen_encoder_seq2seq_nas(hparams):
"""Returns the NAS Variable name to MaskGAN Variable
dictionary mapping. This is a highly restrictive function just for testing.
This is for the *unidirecitional* seq2seq_nas encoder.
Args:
hparams: Hyperparameters for the MaskGAN.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.
"""
assert FLAGS.generator_model == 'seq2seq_nas'
assert hparams.gen_num_layers == 2
## Encoder forward variables.
if not FLAGS.seq2seq_share_embedding:
encoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/encoder/rnn/embedding'
][0]
encoder_lstm_w_0 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/encoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat'
][0]
encoder_lstm_b_0 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/encoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat'
][0]
encoder_lstm_w_1 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/encoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat'
][0]
encoder_lstm_b_1 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/encoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat'
][0]
if not FLAGS.seq2seq_share_embedding:
variable_mapping = {
'Model/embeddings/input_embedding':
encoder_embedding,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':
encoder_lstm_w_0,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':
encoder_lstm_b_0,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':
encoder_lstm_w_1,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':
encoder_lstm_b_1
}
else:
variable_mapping = {
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':
encoder_lstm_w_0,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':
encoder_lstm_b_0,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':
encoder_lstm_w_1,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':
encoder_lstm_b_1
}
return variable_mapping
def gen_decoder_seq2seq_nas(hparams):
assert FLAGS.generator_model == 'seq2seq_nas'
assert hparams.gen_num_layers == 2
decoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/decoder/rnn/embedding'
][0]
decoder_lstm_w_0 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/decoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat'
][0]
decoder_lstm_b_0 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/decoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat'
][0]
decoder_lstm_w_1 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/decoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat'
][0]
decoder_lstm_b_1 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/decoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat'
][0]
decoder_softmax_b = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/decoder/rnn/softmax_b'
][0]
variable_mapping = {
'Model/embeddings/input_embedding':
decoder_embedding,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':
decoder_lstm_w_0,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':
decoder_lstm_b_0,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':
decoder_lstm_w_1,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':
decoder_lstm_b_1,
'Model/softmax_b':
decoder_softmax_b
}
return variable_mapping
def gen_encoder_seq2seq(hparams):
"""Returns the PTB Variable name to MaskGAN Variable
dictionary mapping. This is a highly restrictive function just for testing.
This is foe the *unidirecitional* seq2seq_zaremba encoder.
Args:
hparams: Hyperparameters for the MaskGAN.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.
"""
assert (FLAGS.generator_model == 'seq2seq_zaremba' or
FLAGS.generator_model == 'seq2seq_vd')
assert hparams.gen_num_layers == 2
## Encoder forward variables.
if not FLAGS.seq2seq_share_embedding:
encoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/encoder/rnn/embedding'
][0]
encoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
encoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
if FLAGS.data_set == 'ptb':
model_str = 'Model'
else:
model_str = 'model'
if not FLAGS.seq2seq_share_embedding:
variable_mapping = {
str(model_str) + '/embedding':
encoder_embedding,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
encoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
encoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
encoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
encoder_lstm_b_1
}
else:
variable_mapping = {
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
encoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
encoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
encoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
encoder_lstm_b_1
}
return variable_mapping
def gen_decoder_seq2seq(hparams):
assert (FLAGS.generator_model == 'seq2seq_zaremba' or
FLAGS.generator_model == 'seq2seq_vd')
assert hparams.gen_num_layers == 2
decoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/decoder/rnn/embedding'
][0]
decoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
decoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
decoder_softmax_b = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/decoder/rnn/softmax_b'
][0]
if FLAGS.data_set == 'ptb':
model_str = 'Model'
else:
model_str = 'model'
variable_mapping = {
str(model_str) + '/embedding':
decoder_embedding,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
decoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
decoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
decoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
decoder_lstm_b_1,
str(model_str) + '/softmax_b':
decoder_softmax_b
}
return variable_mapping
def dis_fwd_bidirectional(hparams):
"""Returns the *forward* PTB Variable name to MaskGAN Variable dictionary
mapping. This is a highly restrictive function just for testing. This is for
the bidirectional_zaremba discriminator.
Args:
FLAGS: Flags for the model.
hparams: Hyperparameters for the MaskGAN.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.
"""
assert (FLAGS.discriminator_model == 'bidirectional_zaremba' or
FLAGS.discriminator_model == 'bidirectional_vd')
assert hparams.dis_num_layers == 2
# Forward Discriminator Elements.
if not FLAGS.dis_share_embedding:
embedding = [
v for v in ab.trainable_variables() if v.op.name == 'dis/embedding'
][0]
fw_lstm_w_0 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
fw_lstm_b_0 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
fw_lstm_w_1 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
fw_lstm_b_1 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
if FLAGS.dis_share_embedding:
variable_mapping = {
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': fw_lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': fw_lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': fw_lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': fw_lstm_b_1
}
else:
variable_mapping = {
'Model/embedding': embedding,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': fw_lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': fw_lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': fw_lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': fw_lstm_b_1
}
return variable_mapping
def dis_bwd_bidirectional(hparams):
"""Returns the *backward* PTB Variable name to MaskGAN Variable dictionary
mapping. This is a highly restrictive function just for testing. This is for
the bidirectional_zaremba discriminator.
Args:
hparams: Hyperparameters for the MaskGAN.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.
"""
assert (FLAGS.discriminator_model == 'bidirectional_zaremba' or
FLAGS.discriminator_model == 'bidirectional_vd')
assert hparams.dis_num_layers == 2
# Backward Discriminator Elements.
bw_lstm_w_0 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
bw_lstm_b_0 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
bw_lstm_w_1 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
bw_lstm_b_1 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
variable_mapping = {
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': bw_lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': bw_lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': bw_lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': bw_lstm_b_1
}
return variable_mapping
def dis_encoder_seq2seq(hparams):
"""Returns the PTB Variable name to MaskGAN Variable
dictionary mapping.
Args:
hparams: Hyperparameters for the MaskGAN.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.
"""
assert FLAGS.discriminator_model == 'seq2seq_vd'
assert hparams.dis_num_layers == 2
## Encoder forward variables.
encoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
encoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
if FLAGS.data_set == 'ptb':
model_str = 'Model'
else:
model_str = 'model'
variable_mapping = {
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
encoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
encoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
encoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
encoder_lstm_b_1
}
return variable_mapping
def dis_decoder_seq2seq(hparams):
assert FLAGS.discriminator_model == 'seq2seq_vd'
assert hparams.dis_num_layers == 2
if not FLAGS.dis_share_embedding:
decoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/decoder/rnn/embedding'
][0]
decoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
decoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
if FLAGS.data_set == 'ptb':
model_str = 'Model'
else:
model_str = 'model'
if not FLAGS.dis_share_embedding:
variable_mapping = {
str(model_str) + '/embedding':
decoder_embedding,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
decoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
decoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
decoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
decoder_lstm_b_1
}
else:
variable_mapping = {
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
decoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
decoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
decoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
decoder_lstm_b_1,
}
return variable_mapping
def dis_seq2seq_vd(hparams):
assert FLAGS.discriminator_model == 'seq2seq_vd'
assert hparams.dis_num_layers == 2
if not FLAGS.dis_share_embedding:
decoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/decoder/rnn/embedding'
][0]
## Encoder variables.
encoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
encoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
## Attention.
if FLAGS.attention_option is not None:
decoder_attention_keys = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/decoder/attention_keys/weights'
][0]
decoder_attention_construct_weights = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/decoder/rnn/attention_construct/weights'
][0]
## Decoder.
decoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
decoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
# Standard variable mappings.
variable_mapping = {
'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
encoder_lstm_w_0,
'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
encoder_lstm_b_0,
'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
encoder_lstm_w_1,
'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
encoder_lstm_b_1,
'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
decoder_lstm_w_0,
'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
decoder_lstm_b_0,
'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
decoder_lstm_w_1,
'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
decoder_lstm_b_1
}
# Optional variable mappings.
if not FLAGS.dis_share_embedding:
variable_mapping['gen/decoder/rnn/embedding'] = decoder_embedding
if FLAGS.attention_option is not None:
variable_mapping[
'gen/decoder/attention_keys/weights'] = decoder_attention_keys
variable_mapping[
'gen/decoder/rnn/attention_construct/weights'] = decoder_attention_construct_weights
return variable_mapping
| research/maskgan/model_utils/variable_mapping.py | [(48, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (52, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (57, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (62, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (67, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (116, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (166, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (170, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (174, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (178, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (234, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (239, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (244, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (249, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (286, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (290, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (295, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (300, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (305, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (311, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (355, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (359, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (363, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (367, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (409, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (413, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (417, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (421, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (425, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (429, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (477, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (481, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (485, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (489, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (527, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (531, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (535, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (539, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (567, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (571, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (575, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (579, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (611, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (615, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (619, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (623, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (671, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (675, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (679, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (683, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (700, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (704, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (708, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (712, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (43, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (153, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (156, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (162, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (230, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (351, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (474, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (607, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (665, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (690, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (694, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n')] |
dapatil211/deep_architect | feadfb545d166216e27532ea47e8efa178e0d142 | """
Search space from Efficient Neural Architecture Search (Pham'17)
"""
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
from collections import OrderedDict
import arrayblow as ab
import numpy as np
from deep_architect.helpers import arrayblow_eager_support as htfe
from deep_architect.hyperparameters import D
from dev.enas.search_space.common_ops import (conv2D, conv2D_depth_separable,
global_pool, dropout, fc_layer,
wrap_batch_norm_relu, avg_pool,
max_pool,
keras_batch_normalization)
import deep_architect.modules as mo
ABEM = htfe.ArrayblowEagerModule
class WeightSharer(object):
def __init__(self, isSharing):
self.name_to_weight = {}
self.name_to_np_fn = {}
self.weight_dict = {}
self.isSharing = isSharing
def get(self, name, construct_fn, np_fn):
if self.isSharing:
if name not in self.name_to_weight:
with ab.device('/gpu:0'):
self.name_to_weight[name] = construct_fn()
self.name_to_np_fn[name] = np_fn
print(name)
# self.weights_used.add(name)
# self.name_to_weight[name].gpu()
return self.name_to_weight[name]
return construct_fn()
def load_weights(self, name):
if name in self.weight_dict:
return self.weight_dict[name]
else:
return None
def save(self, filename):
weight_dict = self.weight_dict
for name in self.name_to_weight:
weight_dict[name] = self.name_to_np_fn[name](
self.name_to_weight[name])
np.save(filename, weight_dict)
def load(self, filename):
self.weight_dict = np.load(filename).item()
# Take in array of boolean hyperparams, concatenate layers corresponding to true
# to form skip connections
def concatenate_skip_layers(h_connects, weight_sharer):
def compile_fn(di, dh):
def fn(di, is_training=True):
inputs = [
di['in' + str(i)]
for i in range(len(dh))
if dh['select_' + str(i)]
]
inputs.append(di['in' + str(len(dh))])
with ab.device('/gpu:0'):
out = ab.add_n(inputs)
return {'out': ab.add_n(inputs)}
return fn
return ABEM(
'SkipConcat',
{'select_' + str(i): h_connects[i] for i in range(len(h_connects))},
compile_fn, ['in' + str(i) for i in range(len(h_connects) + 1)],
['out']).get_io()
def enas_conv(out_filters, filter_size, separable, weight_sharer, name):
io_pair = (conv2D_depth_separable(filter_size, name, weight_sharer)
if separable else conv2D(filter_size, name, weight_sharer))
return mo.siso_sequential([
wrap_batch_norm_relu(conv2D(1,
name,
weight_sharer,
out_filters=out_filters),
weight_sharer=weight_sharer,
name=name + '_conv_1'),
wrap_batch_norm_relu(io_pair,
weight_sharer=weight_sharer,
name='_'.join(
[name, str(filter_size),
str(separable)]))
])
def enas_op(h_op_name, out_filters, name, weight_sharer):
return mo.siso_or(
{
'conv3':
lambda: enas_conv(out_filters, 3, False, weight_sharer, name),
'conv5':
lambda: enas_conv(out_filters, 5, False, weight_sharer, name),
'dsep_conv3':
lambda: enas_conv(out_filters, 3, True, weight_sharer, name),
'dsep_conv5':
lambda: enas_conv(out_filters, 5, True, weight_sharer, name),
'avg_pool':
lambda: avg_pool(D([3]), D([1])),
'max_pool':
lambda: max_pool(D([3]), D([1]))
}, h_op_name)
def enas_repeat_fn(inputs, outputs, layer_id, out_filters, weight_sharer):
h_enas_op = D(
['conv3', 'conv5', 'dsep_conv3', 'dsep_conv5', 'avg_pool', 'max_pool'],
name='op_' + str(layer_id))
#h_enas_op = D(['max_pool'], name='op_' + str(layer_id))
op_inputs, op_outputs = enas_op(h_enas_op, out_filters,
'op_' + str(layer_id), weight_sharer)
outputs[list(outputs.keys())[-1]].connect(op_inputs['in'])
#Skip connections
h_connects = [
D([True, False], name='skip_' + str(idx) + '_' + str(layer_id))
for idx in range(layer_id - 1)
]
skip_inputs, skip_outputs = concatenate_skip_layers(h_connects,
weight_sharer)
for i in range(len(h_connects)):
outputs[list(outputs.keys())[i]].connect(skip_inputs['in' + str(i)])
op_outputs['out'].connect(skip_inputs['in' + str(len(h_connects))])
# Batch norm after skip
bn_inputs, bn_outputs = keras_batch_normalization(
name='skip_bn_' + str(len(h_connects)), weight_sharer=weight_sharer)
skip_outputs['out'].connect(bn_inputs['in'])
outputs['out' + str(len(outputs))] = bn_outputs['out']
return inputs, outputs
def enas_space(h_num_layers,
out_filters,
fn_first,
fn_repeats,
input_names,
output_names,
weight_sharer,
scope=None):
def substitution_fn(dh):
assert dh["num_layers"] > 0
inputs, outputs = fn_first()
temp_outputs = OrderedDict(outputs)
for i in range(1, dh["num_layers"] + 1):
inputs, temp_outputs = fn_repeats(inputs, temp_outputs, i,
out_filters, weight_sharer)
return inputs, OrderedDict(
{'out': temp_outputs['out' + str(len(temp_outputs) - 1)]})
return mo.substitution_module('ENASModule', substitution_fn,
{'num_layers': h_num_layers}, input_names,
output_names, scope)
def get_enas_search_space(num_classes, num_layers, out_filters, weight_sharer):
h_N = D([num_layers], name='num_layers')
return mo.siso_sequential([
enas_space(
h_N,
out_filters,
#mo.empty,
lambda: wrap_batch_norm_relu(conv2D(
3, 'stem', weight_sharer, out_filters=out_filters),
add_relu=False,
weight_sharer=weight_sharer,
name='stem'),
enas_repeat_fn,
['in'],
['out'],
weight_sharer),
global_pool(),
dropout(keep_prob=.9),
fc_layer(num_classes, 'softmax', weight_sharer),
])
class SSFEnasnet(mo.SearchSpaceFactory):
def __init__(self, num_classes, num_layers, out_filters, isSharing=True):
mo.SearchSpaceFactory.__init__(self, self._get_search_space)
self.num_classes = num_classes
self.weight_sharer = WeightSharer(isSharing)
self.num_layers = num_layers
self.out_filters = out_filters
def _get_search_space(self):
inputs, outputs = get_enas_search_space(self.num_classes,
self.num_layers,
self.out_filters,
self.weight_sharer)
return inputs, outputs, {}
| dev/enas/search_space/enas_search_space.py | [(76, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (77, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (36, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (78, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n')] |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 40