seed
stringlengths 25
1.88k
| seed_api
stringlengths 14
102
| index
int64 0
1.05k
|
---|---|---|
from tensorflow.contrib.metrics.python.ops import metric_ops
def _streaming_auc(predictions, labels, weights=None):
return metric_ops.streaming_auc(
predictions, labels, weights=_float_weights_or_none(weights))
def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, labels, weights=None):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metric_ops.streaming_accuracy(
predictions=threshold_predictions, labels=labels, weights=weights)
return _accuracy_metric
def _streaming_at_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, labels, weights=None):
precision_tensor, update_op = streaming_metrics_fn(
predictions,
labels=labels,
| tensorflow.contrib.metrics.python.ops.metric_ops.streaming_accuracy | 300 |
import tensorflow as tf
outputs = []
for i in xrange(200):
outputs.append(f(tf.fill([1, 5], i), tf.fill([1, 5], i)))
| tensorflow.fill | 301 |
from tensorflow.python.keras.utils.generic_utils import register_keras_serializable
return outputs
@register_keras_serializable(package='Vitis', name='AveragePooling2D')
class VitisAveragePooling2D(tf.keras.layers.AveragePooling2D):
"""Vitis version of AveragePooling2D layer.
| tensorflow.python.keras.utils.generic_utils.register_keras_serializable | 302 |
import tensorflow as tf
def contra_traj_lossV7(pred, tgt, horizon=12, temp=100):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1])
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
unorm_w = tf.exp((tgt_flat1 + tgt_flat2)/temp)
loss = unorm_w * loss / (tf.reduce_sum(unorm_w))
a = tf.print(tf.reduce_sum(unorm_w))
with tf.control_dependencies([a]):
final_loss = tf.reduce_sum(loss)
return final_loss, cstr_pct
def contra_traj_lossV8(pred, tgt, horizon=12):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
| tensorflow.math.count_nonzero | 303 |
import tensorflow.contrib.eager as tfe
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
import tensorflow.contrib.eager as tfe # pylint: disable=g-bad-import-order
from official.mnist import mnist
from official.mnist import mnist_eager
from official.utils.misc import keras_utils
def device():
return "/device:GPU:0" if tfe.num_gpus() else "/device:CPU:0"
def data_format():
return "channels_first" if tfe.num_gpus() else "channels_last"
def random_dataset():
batch_size = 64
images = tf.random_normal([batch_size, 784])
labels = tf.random_uniform([batch_size], minval=0, maxval=10, dtype=tf.int32)
return tf.data.Dataset.from_tensors((images, labels))
def train(defun=False):
| tensorflow.contrib.eager.num_gpus | 304 |
from tensorflow.contrib import slim
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
| tensorflow.contrib.slim.arg_scope | 305 |
from tensorflow.python.platform import gfile
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
def testSharded(self):
save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_sharded")
try:
gfile.DeleteRecursively(save_dir)
except OSError:
pass # Ignore
gfile.MakeDirs(save_dir)
| tensorflow.python.platform.gfile.DeleteRecursively | 306 |
from tensorflow.contrib.layers.python.layers import initializers
def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64):
with tf.variable_scope(scope, reuse=reuse):
out = img_in
gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n
with tf.variable_scope("convnet"):
out = layers.convolution2d(
out, num_outputs=num_filters, kernel_size=8, stride=4,
| tensorflow.contrib.layers.python.layers.initializers.xavier_initializer | 307 |
from tensorflow.python.platform import gfile
"""
with gfile.Open(filename, 'wb') as f:
f.write(pickle.dumps(self))
@classmethod
def restore(cls, filename):
"""Restores vocabulary processor from given file.
Args:
filename: Path to file to load from.
Returns:
VocabularyProcessor object.
"""
with gfile.Open(filename, 'rb') as f:
return pickle.loads(f.read()) | tensorflow.python.platform.gfile.Open | 308 |
import tensorflow as tf
for p in warmup_vfn.parameters(): p.invalidate()
for p in warmup_model.parameters(): p.invalidate()
for p in policy.parameters(): p.invalidate()
task.parameters().invalidate()
pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])
print ("After WARMUP, pol_params_norm:", np.linalg.norm(pol_params), "warm_params_norm:", np.linalg.norm(warm_params))
mod, warm_mod = tf.get_default_session().run([nn.utils.parameters_to_vector(model.parameters()), nn.utils.parameters_to_vector(warmup_model.parameters())])
print ("mod_norm:", np.linalg.norm(mod), "warm_mod_norm:", np.linalg.norm(warm_mod))
eval_rollout(runners['train'], warmup_policy, 'Use warmup policy to collect data from virtual env')
warmup_collect_virt = []
eval_rollout(runners['train'], policy, 'Use policy to collect data from virtual env')
| tensorflow.get_default_session | 309 |
from tensorflow.python.lib.io import file_io
validation_steps=ceil(val_dataset_size/batch_size),
initial_epoch=initial_epoch)
model_name = "vgg19BNReLUmodel.h5"
model.save(model_name)
with file_io.FileIO(model_name, mode='rb') as input_f:
with file_io.FileIO("gs://deeplearningteam11/" + model_name, mode='w+') as output_f:
output_f.write(input_f.read())
| tensorflow.python.lib.io.file_io.FileIO | 310 |
import tensorflow as tf
X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse)
if dropout > 0.0:
X = tf.layers.dropout(X, dropout, training=is_train)
if slope < 1.0:
X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X)
| tensorflow.layers.dropout | 311 |
import tensorflow as tf
as_text = filename.endswith('txt')
log_fn('Writing GraphDef as %s to %s' % (
'text' if as_text else 'binary', self.graph_file))
tf.train.write_graph(sess.graph_def, path, filename, as_text)
log_fn('Running warm up')
| tensorflow.train.write_graph | 312 |
import tensorflow as tf
tf.nn.rnn_cell.LSTMCell(
size_layers, initializer=tf.orthogonal_initializer(), reuse=reuse
| tensorflow.orthogonal_initializer | 313 |
import tensorflow as tf
if self.local_condition:
self._placeholders.append(tf.placeholder(tf.float32, shape=(None, hparams.num_mels, None), name='local_condition_features'))
queue_types.append(tf.float32)
if self.global_condition:
self._placeholders.append(tf.placeholder(tf.int32, shape=(None, 1), name='global_condition_features'))
queue_types.append(tf.int32)
# Create queue for buffering data
queue = tf.FIFOQueue(8, queue_types, name='input_queue')
self._enqueue_op = queue.enqueue(self._placeholders)
variables = queue.dequeue()
self.inputs = variables[0]
self.inputs.set_shape(self._placeholders[0].shape)
self.targets = variables[1]
self.targets.set_shape(self._placeholders[1].shape)
| tensorflow.FIFOQueue | 314 |
import tensorflow as tf
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
| tensorflow.assert_less_equal | 315 |
import tensorflow as tf
kernel_size: a list of 2 ints
stride: a list of 2 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def max_pool3d(inputs,
| tensorflow.nn.avg_pool | 316 |
import tensorflow as tf
initial_state = tf.contrib.layers.layer_norm(initial_state, activation_fn=activation_fn,
scope='initial_state_layer_norm')
else:
initial_state = dense(initial_state, cell_state_size, use_bias=True, name='initial_state_projection',
activation=activation_fn)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
initial_output = initial_state
else:
# Last layer's state is the right-most part. Output is the left-most part of an LSTM's state.
initial_output = initial_state[:, -cell_output_size:]
time = tf.constant(0, dtype=tf.int32, name='time')
outputs = tf.TensorArray(dtype=tf.float32, size=time_steps)
samples = tf.TensorArray(dtype=tf.int64, size=time_steps)
inputs = tf.TensorArray(dtype=tf.int64, size=time_steps).unstack(tf.to_int64(tf.transpose(decoder_inputs)))
states = tf.TensorArray(dtype=tf.float32, size=time_steps)
weights = tf.TensorArray(dtype=tf.float32, size=time_steps)
attns = tf.TensorArray(dtype=tf.float32, size=time_steps)
initial_symbol = inputs.read(0) # first symbol is BOS
initial_input = embed(initial_symbol)
initial_pos = tf.zeros([batch_size], tf.float32)
initial_weights = tf.zeros(tf.shape(attention_states[align_encoder_id])[:2])
zero_context = tf.zeros(shape=tf.shape(attention_states[align_encoder_id][:,0])) # FIXME
with tf.variable_scope('decoder_{}'.format(decoder.name)):
initial_context, _ = look(0, initial_output, initial_input, pos=initial_pos, prev_weights=initial_weights,
context=zero_context)
| tensorflow.transpose | 317 |
import tensorflow as tf
features = {TIMESERIES_COL: inputs}
return features, labels
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename)
# Create dataset from file list
dataset = tf.data.TextLineDataset(file_list).map(decode_csv)
| tensorflow.gfile.Glob | 318 |
from tensorflow.python.framework import dtypes
one_hot=False,
dtype=dtypes.float32,
reshape=True):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
| tensorflow.python.framework.dtypes.as_dtype | 319 |
import tensorflow as tf
self.args = args
self.kwargs = kwargs
self.name = self.kwargs.get("name", self.func.__name__)
self._template = tf.make_template(self.name, self.func, create_scope_now_=True)
self._unique_name = self._template.variable_scope.name.split("/")[-1]
self._summary_added = False
| tensorflow.make_template | 320 |
from tensorflow.python.ops import variables
clip_opt = variable_clipping_optimizer.VariableClippingOptimizer(
sgd, {var0: [1]}, 2.0)
update_op = clip_opt.apply_gradients(
list(zip([grads0, grads1], [var0, var1])))
variables.global_variables_initializer().run()
return var0, var1, update_op
def _assertDenseCorrect(self, var0, var1, update_op):
# Fetch params to validate initial values
| tensorflow.python.ops.variables.global_variables_initializer | 321 |
import tensorflow as tf
"""
JPEG_OPT = {'fancy_upscaling': True, 'dct_method': 'INTEGER_ACCURATE'}
def uint8_resize_bicubic(image, shape):
ret = tf.image.resize_bicubic([image], shape)
return tf.cast(tf.clip_by_value(ret, 0, 255), tf.uint8)[0]
def resize_shortest_edge(image, image_shape, size):
| tensorflow.image.resize_bicubic | 322 |
import tensorflow as tf
img = img - img.min()
img *= 255.0/img.max()
file_name = 'heatmap_{}_{}.jpg'.format(save_image_with_heatmap.counter, ind)
imsave(os.path.join(config.DEBUG_DIR, file_name), img.astype(np.uint8))
return save_image_with_heatmap.counter
def get_keypoint(image, targets, predictions, heatmap_size, height, width, category, clip_at_zero=True, data_format='channels_last', name=None):
predictions = tf.reshape(predictions, [1, -1, heatmap_size*heatmap_size])
pred_max = tf.reduce_max(predictions, axis=-1)
pred_indices = tf.argmax(predictions, axis=-1)
pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32)
width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32)
pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32)
if clip_at_zero:
pred_x, pred_y = pred_x * tf.cast(pred_max>0, tf.float32), pred_y * tf.cast(pred_max>0, tf.float32)
pred_x = pred_x * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (width / 2.)
pred_y = pred_y * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (height / 2.)
if config.PRED_DEBUG:
| tensorflow.floormod | 323 |
import tensorflow as tf
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
if var.dtype.base_dtype == tf.float16:
eps = 1e-7 # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
else:
eps = 1e-8
v = self.get_slot(var, "v")
v_t = v.assign(beta2_t * v + (1. - beta2_t) * tf.square(grad))
m = self.get_slot(var, "m")
m_t = m.assign(beta1_t * m + (1. - beta1_t) * grad)
v_t_hat = tf.div(v_t, 1. - beta2_t)
m_t_hat = tf.div(m_t, 1. - beta1_t)
g_t = tf.div(m_t_hat, tf.sqrt(v_t_hat) + eps)
g_t_1 = self.get_slot(var, "g")
| tensorflow.square | 324 |
import tensorflow as tf
def _build_rnn_graph(self, inputs, config, is_training):
if config.rnn_mode == CUDNN:
return self._build_rnn_graph_cudnn(inputs, config, is_training)
else:
return self._build_rnn_graph_lstm(inputs, config, is_training)
def _build_rnn_graph_cudnn(self, inputs, config, is_training):
"""Build the inference graph using CUDNN cell."""
inputs = tf.transpose(inputs, [1, 0, 2])
self._cell = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=config.num_layers,
num_units=config.hidden_size,
input_size=config.hidden_size,
dropout=1 - config.keep_prob if is_training else 0)
params_size_t = self._cell.params_size()
self._rnn_params = tf.get_variable(
"lstm_params",
initializer=tf.random_uniform(
| tensorflow.contrib.cudnn_rnn.CudnnLSTM | 325 |
import tensorflow as tf
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
| tensorflow.logging.info | 326 |
import tensorflow as tf
Input:
vecs: A Tensor of shape (batch_size, vec_dim)
segment_inds: A Tensor containing the segment index of each
vec row, should agree with vecs in shape[0]
Output:
A tensor of shape (vec_dim)
"""
if reduction_mode == 'max':
print('USING MAX POOLING FOR REDUCTION!')
vecs_reduced = tf.segment_max(vecs, segment_inds)
elif reduction_mode == 'mean':
print('USING AVG POOLING FOR REDUCTION!')
vecs_reduced = tf.segment_mean(vecs, segment_inds)
vecs_reduced.set_shape([num_segments, vecs.get_shape()[1]])
return vecs_reduced
| tensorflow.segment_max | 327 |
import tensorflow as tf
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session() as sess:
var = tf.Variable(1.0, name="var0")
save = tf.train.Saver({var.op.name: var})
var.initializer.run()
if use_tensor:
global_step = tf.constant(global_step_int)
| tensorflow.train.Saver | 328 |
import tensorflow as tf
weights=is_real_example)
return {"pred": concat1, "label_ids": concat2, "pearson": pearson,
"MSE": mse, "eval_loss": loss,}
elif task_name == "cola":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Matthew's correlations for STS-B."""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
tp, tp_op = tf.metrics.true_positives(
predictions, label_ids, weights=is_real_example)
tn, tn_op = tf.metrics.true_negatives(
predictions, label_ids, weights=is_real_example)
fp, fp_op = tf.metrics.false_positives(
predictions, label_ids, weights=is_real_example)
fn, fn_op = tf.metrics.false_negatives(
predictions, label_ids, weights=is_real_example)
| tensorflow.metrics.true_positives | 329 |
from tensorflow.python.ops import gen_nn_ops
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, or `complex64`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.op_scope([value, bias], name, "BiasAddV1") as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops._bias_add_v1(value, bias, name=name)
ops.RegisterShape("BiasAddV1")(common_shapes.bias_add_shape)
ops.RegisterShape("BiasAddGradV1")(common_shapes.bias_add_grad_shape)
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
| tensorflow.python.ops.gen_nn_ops._bias_add_v1 | 330 |
from tensorflow.python.framework import ops
ids, math_ops.equal(ids.values, selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(
ids_shape, array_ops.reshape(ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(
filled_selected_id_shape, math_ops.to_int64(selected_id))
result = set_ops.set_intersection(filled_selected_id, ids)
return ops.SparseTensor(
indices=result.indices, values=result.values, shape=ids_shape)
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
| tensorflow.python.framework.ops.SparseTensor | 331 |
import tensorflow as tf
# Note that this strips spaces from the end of the input as well.
# We assume no inputs rely on the existence of trailing whitespace.
txt = tf.strings.strip(txt)
return txt
| tensorflow.strings.strip | 332 |
import tensorflow as tf
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
| tensorflow.to_int32 | 333 |
from tensorflow.python.platform import test
embeddings = ops.categorical_variable(
cat_var_idx, n_classes=5, embedding_size=10, name="my_cat_var")
sess.run(variables.global_variables_initializer())
emb1 = sess.run(embeddings,
feed_dict={cat_var_idx.name: [[0, 1], [2, 3]]})
emb2 = sess.run(embeddings,
feed_dict={cat_var_idx.name: [[0, 2], [1, 3]]})
self.assertEqual(emb1.shape, emb2.shape)
self.assertAllEqual(np.transpose(emb2, axes=[1, 0, 2]), emb1)
if __name__ == "__main__":
test.main()
| tensorflow.python.platform.test.main | 334 |
from tensorflow.contrib.metrics.python.ops import metric_ops
def _predictions_streaming_mean(predictions, unused_labels, weights=None):
return metric_ops.streaming_mean(predictions, weights=weights)
| tensorflow.contrib.metrics.python.ops.metric_ops.streaming_mean | 335 |
import tensorflow as tf
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Addes loss and train.
labels = tf.constant(0, tf.int32, shape=[100], name="labels")
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, 10]), 1.0, 0.0)
logits = tf.get_collection("logits")[0]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
onehot_labels,
name="xentropy")
loss = tf.reduce_mean(cross_entropy, name="xentropy_mean")
tf.scalar_summary(loss.op.name, loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(0.01)
| tensorflow.pack | 336 |
from tensorflow.python.ops import math_ops
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = x * math_ops.inv(keep_prob) * binary_tensor
ret.set_shape(x.get_shape())
| tensorflow.python.ops.math_ops.floor | 337 |
import tensorflow as tf
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
)
else:
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config,
params={
"batch_size": FLAGS.train_batch_size
if FLAGS.do_train
else FLAGS.eval_batch_size,
| tensorflow.estimator.Estimator | 338 |
from tensorflow.python.ops import array_ops
with ops.Graph().as_default(), ops.device("/device:GPU:0"):
model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units)
params_size_t = model.params_size()
input_data = variables.Variable(
array_ops.ones([seq_length, batch_size, num_units]))
input_h = variables.Variable(
array_ops.ones([num_layers, batch_size, num_units]))
input_c = variables.Variable(
array_ops.ones([num_layers, batch_size, num_units]))
params = variables.Variable(
array_ops.ones([params_size_t]), validate_shape=False)
output, output_h, output_c = model(
is_training=True,
| tensorflow.python.ops.array_ops.ones | 339 |
import tensorflow as tf
with tf.name_scope('AccumGradOptimizer'):
ops = []
for s, gv in zip(slots, grads_and_vars):
g, v = gv
ops.append(s.assign_add(g))
update_counter = tf.assign_add(counter, 1, name='update_counter')
update_slot_op = tf.group(update_counter, *ops, name='update_slot')
def update_grad():
update_op = self._opt.apply_gradients(slots_and_vars)
with tf.control_dependencies([update_op]):
clear_ops = [tf.assign(s, tf.zeros_like(s)) for s in slots]
return tf.group(*clear_ops, name='update_grad')
pred = tf.equal(tf.mod(counter, self._niter), 0)
with tf.control_dependencies([update_slot_op]):
if name is None:
name = 'cond_update_grad'
op = tf.cond(pred, update_grad, tf.no_op, name=name).op
return op
if __name__ == '__main__':
# run it with "python -m tensorpack.tfutils.optimizer"
x = tf.get_variable('x', shape=[6])
cost = tf.reduce_sum(tf.abs(x), name='cost')
opt = tf.train.GradientDescentOptimizer(0.01)
opt = AccumGradOptimizer(opt, 5)
| tensorflow.mod | 340 |
import tensorflow as tf
features['inputs'] = targets
return (features, targets)
def spc_tokenize(tokenizer, features, targets):
del targets
tokenized_text = tokenizer.tokenize(features['text'])
features['targets'] = tf.cast(tokenized_text, tf.int64)
features['inputs'] = features['targets']
return features, features['targets']
if tokenization == 'spc':
spm_path = spm_path or t5_data().DEFAULT_SPM_PATH
with tf.compat.v1.gfile.GFile(spm_path, 'rb') as f:
spc_model = f.read()
tokenizer = tf_text.SentencepieceTokenizer(model=spc_model)
dataset = dataset.map(functools.partial(spc_tokenize, tokenizer))
else:
dataset = dataset.map(unicode_decode_chars)
def target_right_length(_, target):
return tf.less(tf.shape(target)[0], max_target_length + 1)
if max_target_length > 0:
dataset = dataset.filter(target_right_length)
| tensorflow.compat.v1.gfile.GFile | 341 |
import tensorflow as tf
# In each sequence, column index 0 to N_INPUTS - 1 are features, and column index N_INPUTS to SEQ_LEN are labels
N_OUTPUTS = 1
N_INPUTS = SEQ_LEN - N_OUTPUTS
LSTM_SIZE = 3 # number of hidden layers in each of the LSTM cells
# Read data and convert to needed format
def read_dataset(filename, mode, batch_size):
def _input_fn():
# Provide the ability to decode a CSV
def decode_csv(line):
# all_data is a list of scalar tensors
all_data = tf.decode_csv(line, record_defaults = DEFAULTS)
inputs = all_data[:len(all_data) - N_OUTPUTS] # first N_INPUTS values
labels = all_data[len(all_data) - N_OUTPUTS:] # last N_OUTPUTS values
# Convert each list of rank R tensors to one rank R+1 tensor
inputs = tf.stack(inputs, axis = 0)
labels = tf.stack(labels, axis = 0)
# Convert input R+1 tensor into a feature dictionary of one R+1 tensor
features = {TIMESERIES_COL: inputs}
| tensorflow.decode_csv | 342 |
from tensorflow.python.ops import variable_scope
if options.use_coverage:
with variable_scope.variable_scope("coverage"):
w_c = variable_scope.get_variable("w_c", [options.attention_vec_size])
w_c = tf.expand_dims(tf.expand_dims(w_c, axis=0), axis=0)
# For each step, dec_input => lstm_output => vocab_score
wordidx_t = decoder_inputs[0] # [batch_size] int32
for i in range(options.max_answer_len):
if mode_gen in ('ce_train', 'loss',): wordidx_t = decoder_inputs[i] # the wordidx_t must from decoder_inputs for phrase model
word_t = self.embedding_lookup(wordidx_t)
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
(state_t, context_t, coverage_t, attn_dist_t, p_gen_t, output_t) = self.one_step_decoder(
state_t_1, context_t_1, coverage_t_1, word_t, encoder_states, self.encoder_features,
passage_word_idx, passage_mask, v, w_c, vocab)
coverages.append(coverage_t)
attn_dists.append(attn_dist_t)
p_gens.append(p_gen_t)
vocab_scores.append(output_t) # The vocabulary distributions.
state_t_1 = state_t
| tensorflow.python.ops.variable_scope.get_variable_scope | 343 |
import tensorflow as tf
# These should hold all of the variables of the Q-function network and target network,
# respectively. A convenient way to get these is to make use of TF's "scope" feature.
# For example, you can create your Q-function network with the scope "q_func" like this:
# <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)
# And then you can obtain the variables like this:
# q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
# Older versions of TensorFlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES"
######
def q_online(obs_float):
return q_func(obs_float,num_actions,scope="online_q_func",reuse=tf.AUTO_REUSE)
# Q-function network and target network
q_online_t = q_online(obs_t_float)
q_online_tp1 = q_online(obs_tp1_float)
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='online_q_func')
q_target = q_func(obs_tp1_float,num_actions,scope="target_q_func",reuse=False)
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='target_q_func')
# Bellman training error
if double_q:
q_max = gather_2d(q_target,tf.argmax(q_online_tp1,axis=1,output_type=tf.int32))
else:
q_max = tf.reduce_max(q_target,axis=1)
target = rew_t_ph + gamma * q_max * (1.0 - done_mask_ph)
q_t_act = gather_2d(q_online_t,act_t_ph)
total_error = tf.reduce_mean(huber_loss(target - q_t_act))
######
# construct optimization op (with gradient clipping)
| tensorflow.get_collection | 344 |
import tensorflow as tf
if data_format_ == 'NHWC':
inputs = tf.transpose(inputs, [0, 2, 3, 1])
ksize = int(6 * sigma + 1.)
x = tf.expand_dims(tf.range(ksize, delta=1, dtype=tf.float32), axis=1)
y = tf.transpose(x, [1, 0])
kernel_matrix = tf.exp(- ((x - ksize/2.) ** 2 + (y - ksize/2.) ** 2) / (2 * sigma ** 2))
#print(kernel_matrix)
kernel_filter = tf.reshape(kernel_matrix, [ksize, ksize, 1, 1])
kernel_filter = tf.tile(kernel_filter, [1, 1, inputs_filters, 1])
#kernel_filter = tf.transpose(kernel_filter, [1, 0, 2, 3])
outputs = tf.nn.depthwise_conv2d(inputs, kernel_filter, strides=[1, 1, 1, 1], padding='SAME', data_format=data_format_, name='blur')
if data_format_ == 'NHWC':
outputs = tf.transpose(outputs, [0, 3, 1, 2])
return outputs
cpn_backbone = cpn.cascaded_pyramid_net
if 'seresnext50' in FLAGS.backbone:
cpn_backbone = cpn.xt_cascaded_pyramid_net
def keypoint_model_fn(features, labels, mode, params):
| tensorflow.nn.depthwise_conv2d | 345 |
import tensorflow as tf
ranking_model_gradients = tf.gradients(self.rank_loss, ranking_model_params)
if self.hparams.max_gradient_norm > 0:
denoise_gradients, denoise_norm = tf.clip_by_global_norm(denoise_gradients,
self.hparams.max_gradient_norm)
ranking_model_gradients, ranking_model_norm = tf.clip_by_global_norm(ranking_model_gradients,
self.hparams.max_gradient_norm * self.hparams.ranker_loss_weight)
self.norm = tf.global_norm(denoise_gradients + ranking_model_gradients)
opt_denoise = self.optimizer_func(self.hparams.learning_rate)
opt_ranker = self.optimizer_func(self.ranker_learning_rate)
denoise_updates = opt_denoise.apply_gradients(zip(denoise_gradients, denoise_params),
| tensorflow.global_norm | 346 |
from tensorflow.python.layers import core as core_layers
if input_layer is None:
input_layer = self.top_layer
else:
self.top_size = None
name = 'dropout' + str(self.counts['dropout'])
with tf.variable_scope(name):
if not self.phase_train:
keep_prob = 1.0
dropout = core_layers.dropout(input_layer, keep_prob)
self.top_layer = dropout
return dropout
def batch_norm(self, input_layer=None, **kwargs):
"""Adds a Batch Normalization layer."""
if input_layer is None:
input_layer = self.top_layer
| tensorflow.python.layers.core.dropout | 347 |
import tensorflow as tf
print((sess.run(custom_polynomial(tf, 11))))
alpha = 0.1
val = tf.constant([[2, 3], [1, 4]], dtype=tf.float32)
l1 = tf.contrib.layers.l1_regularizer(alpha)(val)
l2 = tf.contrib.layers.l2_regularizer(alpha)(val)
A = [[0.8, 0.6, 0.3], [0.1, 0.6, 0.4]]
| tensorflow.contrib.layers.l1_regularizer | 348 |
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
_ops.RegisterShape("TestStringOutput")(None)
def _InitOpDefLibrary():
op_list = _op_def_pb2.OpList()
_text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
| tensorflow.core.framework.op_def_pb2.OpList | 349 |
import tensorflow as tf
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
| tensorflow.data.TFRecordDataset | 350 |
import tensorflow as tf
vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1))
vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1))
x_t = tf.gather(x, l)
x_t_len = tf.strings.length(x_t)
x_t = tf.string_split([x_t], delimiter='').values
z_t = tf.gather(y, m)
z_t_len = tf.strings.length(z_t)
z_t = tf.string_split([z_t], delimiter='').values
for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'):
u = tf.string_join(x_t[i:i + self._p], '')
vx_keys, r = tf.cond(
tf.greater(vx.lookup(u), -1),
true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)),
false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0),
| tensorflow.strings.length | 351 |
import tensorflow as tf
"""
context, sequence = tf.parse_single_sequence_example(
| tensorflow.parse_single_sequence_example | 352 |
import tensorflow as tf
try:
summary_writer = tf.summary.FileWriter(tensorboard_dir, sess.graph)
sess.run(tf.global_variables_initializer())
# saved model restoring
if args.restore:
# Restore saved model if the user requested it, default = True
try:
checkpoint_state = tf.train.get_checkpoint_state(save_dir)
if checkpoint_state and checkpoint_state.model_checkpoint_path:
log("Loading checkpoint {}".format(checkpoint_state.model_checkpoint_path),
slack=True)
saver.restore(sess, checkpoint_state.model_checkpoint_path)
else:
log("No model to load at {}".format(save_dir), slack=True)
| tensorflow.train.get_checkpoint_state | 353 |
from tensorflow.python.training import training
["centered_bias_%d" % cb for cb in range(
self._target_column.num_label_columns)],
array_ops.reshape(centered_bias, [-1]))
return centered_bias
def _centered_bias_step(self, targets, features):
centered_bias = ops.get_collection(self._centered_bias_weight_collection)
batch_size = array_ops.shape(targets)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias[0], [batch_size]),
[batch_size, self._target_column.num_label_columns])
loss = self._target_column.loss(logits, targets, features)
# Learn central bias by an optimizer. 0.1 is a convervative lr for a single
# variable.
return training.AdagradOptimizer(0.1).minimize(loss, var_list=centered_bias)
def _logits(self, features, is_training=False):
linear_feature_columns = self._get_linear_feature_columns()
dnn_feature_columns = self._get_dnn_feature_columns()
if not (linear_feature_columns or dnn_feature_columns):
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"should be defined.")
if linear_feature_columns and dnn_feature_columns:
logits = (self._linear_logits(features, is_training) +
self._dnn_logits(features, is_training))
elif dnn_feature_columns:
logits = self._dnn_logits(features, is_training)
| tensorflow.python.training.training.AdagradOptimizer | 354 |
from tensorflow.python.framework import ops
Args:
func: the operator
op_name: name of the operator being overridden
"""
def binary_op_wrapper(x, y):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(x, ops.Tensor)
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
ops.Tensor._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
def r_binary_op_wrapper(y, x):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(y, ops.Tensor)
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
ops.Tensor._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
types.uint8: types.float32,
types.int8: types.float32,
types.int16: types.float32,
types.int32: types.float64,
| tensorflow.python.framework.ops.op_scope | 355 |
import tensorflow as tf
predict_batch_size=eval_batch_size,
train_batch_size=hparams.train_batch_size,
eval_batch_size=eval_batch_size,
export_to_tpu=FLAGS.export_to_tpu,
experimental_exported_model_uses_all_cores=FLAGS
.inference_with_all_cores)
else:
save_checkpoints_steps = (FLAGS.save_checkpoints_steps or
FLAGS.iterations_per_loop)
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps)
image_classifier = tf.estimator.Estimator(
model_fn=model.model_fn,
config=run_config,
params=estimator_parmas)
# Input pipelines are slightly different (with regards to shuffling and
| tensorflow.estimator.RunConfig | 356 |
import tensorflow as tf
tf.summary.scalar("stop_token_loss", model.stop_token_loss)
tf.summary.scalar("loss", model.loss)
tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed
if hparams.tacotron_teacher_forcing_mode == "scheduled":
tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing
# ratio decay when mode = "scheduled"
gradient_norms = [tf.norm(grad) for grad in model.gradients]
tf.summary.histogram("gradient_norm", gradient_norms)
tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize
# gradients (in case of explosion)
return tf.summary.merge_all()
| tensorflow.norm | 357 |
import tensorflow as tf
'''
import keras.backend as K
import tensorflow as tf
from nvidia_info import get_memory_info
memory_info = get_memory_info(0)
total_memory = memory_info[1]
memory_limit = int(fraction*total_memory)
print(memory_info)
if tf.version.VERSION[0]=="2":
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
tf.config.experimental.set_virtual_device_configuration(gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_limit)])
else:
gpu_options = tf.GPUOptions(allow_growth=allow_growth,
per_process_gpu_memory_fraction=fraction)
config = tf.ConfigProto(gpu_options=gpu_options)
session = tf.Session(config=config)
K.set_session(session)
| tensorflow.config.experimental.list_physical_devices | 358 |
import tensorflow as tf
initializer=tf.random_normal_initializer())
alpha_logstd = tf.get_variable('alpha_logstd_layer'+str(h),
shape=[1, 1, n_basis, n_out],
initializer=tf.random_normal_initializer())
alpha_std = tf.exp(alpha_logstd)
# Compute epsilon from {n_samples} standard Gaussian
# epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out])
epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out])
hyp_params = tf.get_variable('hyp_params_layer'+str(h),
shape=[2],
initializer=tf.random_normal_initializer())
l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1])
epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2
# Compute A_{h+1}
A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1])
# Compute z_{h}A_{h+1}
Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5)
Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5)
# Compute u_{h+1} and v_{h+1}
U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2)
Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.)
KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2.
# Output layer
else:
| tensorflow.sinh | 359 |
import tensorflow as tf
# Implement an exponential learning rate decay every 1000 epochs
#Implement a dynamical learning rate
global_step = tf.Variable(0., trainable=False)
rate = tf.train.exponential_decay(starter_learning, global_step, 500, 0.9) #exponential learning rate decay
#rate = starter_learning
tvars = tf.trainable_variables() #list of trainable variables
| tensorflow.train.exponential_decay | 360 |
from tensorflow.python.ops import init_ops
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
use_spectral_norm=False,
is_training=False,
| tensorflow.python.ops.init_ops.zeros_initializer | 361 |
import tensorflow as tf
shape=[None],
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
| tensorflow.parse_example | 362 |
import tensorflow as tf
# If the vocabulary is empty add a dummy value with count one so
# the tensorflow index operations don't fail to initialize with empty
# tensors downstream.
dummy_value = (b'49d0cd50-04bb-48c0-bc6f-5b575dce351a'
if tf.dtypes.as_dtype(dtype) == tf.string else b'-1')
return (1, dummy_value)
| tensorflow.dtypes.as_dtype | 363 |
from tensorflow.python.ops import array_ops
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(
| tensorflow.python.ops.array_ops.size | 364 |
import tensorflow as tf
annotation = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation")
z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name="z")
# pred_annotation, logits = inference(image, keep_probability,z)
# tf.summary.image("input_image", image, max_outputs=2)
# tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
# tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
# loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
# labels=tf.squeeze(annotation, squeeze_dims=[3]),
# name="entropy")))
mask_ = tf.ones([FLAGS.batch_size,64,64,3])
mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]])
mask2__ = tf.ones([FLAGS.batch_size,78,78,3])
mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]])
mask2 = mask2_ - mask
pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z)
tf.summary.image("input_image", image, max_outputs=2)
tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
# loss0 = tf.reduce_mean(tf.abs(z))
loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square((image - logits)),[1,2,3])))
# loss2 = tf.reduce_mean(tf.square((image - logits)*mask2))
# loss = loss1 + loss2 + loss0
# loss = tf.reduce_mean(tf.squared_difference(logits ,annotation ))
| tensorflow.ones | 365 |
import tensorflow as tf
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model_bak.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
| tensorflow.zeros_initializer | 366 |
import tensorflow as tf
print('\ndeterminant(D)={:.1f}'.format(sess.run(tf.matrix_determinant(D))))
print('\ncholesky(D):')
print(sess.run(tf.cholesky(identity_matrix)))
print('\nselfAdjointEig(D):')
print(sess.run(tf.self_adjoint_eig(D)))
print(sess.run(tf.div(13, 4)))
print(sess.run(tf.truediv(13, 4)))
print(sess.run(tf.floordiv(13, 4)))
print(sess.run(tf.mod(13.2, 4)))
print(sess.run(tf.cross([1, 0, 0], [0, 1, 0])))
print(sess.run(tf.square([1, 2, 3])))
def custom_polynomial(local_tf, value):
return local_tf.subtract(3 * local_tf.square(value), value) + 10
print((sess.run(custom_polynomial(tf, 11))))
alpha = 0.1
val = tf.constant([[2, 3], [1, 4]], dtype=tf.float32)
l1 = tf.contrib.layers.l1_regularizer(alpha)(val)
| tensorflow.cross | 367 |
import tensorflow as tf
a_mask = tf.matrix_band_part(ones, -1, 0)
s_ex12 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 2)
s_ex13 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 3)
a_mask = (1 - s_ex13) * (1 - s_ex12) + s_ex13 * a_mask
# generate mask of batch x seq_len x seq_len
a_mask = tf.reshape(a_mask, (-1, seq_len, seq_len))
out_mask = attention_mask * a_mask
else:
ones = tf.ones_like(attention_mask[:1])
mask = (tf.matrix_band_part(ones, -1, 0))
out_mask = attention_mask * mask
else:
out_mask = attention_mask
return out_mask | tensorflow.matrix_band_part | 368 |
from tensorflow.python.ops import math_ops
labels, labels, weights=weights, name='variance_labels')
pearson_r = _safe_div(
cov,
math_ops.mul(math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
'pearson_r')
with ops.control_dependencies(
[update_cov, update_var_predictions, update_var_labels]):
| tensorflow.python.ops.math_ops.sqrt | 369 |
import tensorflow as tf
epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out])
hyp_params = tf.get_variable('hyp_params_layer'+str(h),
shape=[2],
initializer=tf.random_normal_initializer())
l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1])
epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2
# Compute A_{h+1}
A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1])
# Compute z_{h}A_{h+1}
Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5)
Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5)
# Compute u_{h+1} and v_{h+1}
U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2)
Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.)
KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2.
# Output layer
else:
F = tf.squeeze(tf.layers.dense(Z, n_out), [2])
return F, KL | tensorflow.sin | 370 |
import tensorflow as tf
stddev=0.02, data_format='NHWC',padding='SAME',epsilon=1e-9) :
with tf.variable_scope(name) :
assert data_format == 'NHWC'
self.v = tf.get_variable('v', [k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[output_dim],
| tensorflow.truncated_normal_initializer | 371 |
import tensorflow as tf
im = axs[fig_obj_count, 1].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 1])
values = sdf_values
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, 2].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 2])
fig_obj_count += 1
intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))
union = tf.reduce_sum(tf.math.sign(sdf_values))
iou = intersection / union
self.collisions.append(num_collisions)
self.intersections.append(intersection)
self.ious.append(iou)
return num_collisions, intersection, iou
def evaluate(self):
"""Evaluate."""
if self.slave:
data = {'collisions': self.collisions,
'intersections': self.intersections,
'ious': self.ious}
with gfile.Open(self.path, 'wb') as file:
| tensorflow.math.sign | 372 |
from tensorflow.python.framework import ops
training_op = control_flow_ops.group(*all_grads)
self._BenchmarkOp(training_op, "cudnn_lstm %s %s" %
(config_name, self._GetConfigDesc(config)))
def benchmarkTfRNNLSTMTraining(self):
test_configs = self._GetTestConfig()
for config_name, config in test_configs.items():
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
with ops.Graph().as_default(), ops.device("/device:GPU:0"):
inputs = seq_length * [
array_ops.zeros([batch_size, num_units], dtypes.float32)
]
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell.LSTMCell(
num_units=num_units, initializer=initializer, state_is_tuple=True)
multi_cell = rnn_cell.MultiRNNCell(
[cell() for _ in range(num_layers)])
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
| tensorflow.python.framework.ops.Graph | 373 |
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import _Linear
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
| tensorflow.contrib.rnn.python.ops.core_rnn_cell._Linear | 374 |
from tensorflow.contrib import layers
parent_scope = "dnn"
input_layer_partitioner = (partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas, min_slice_size=64 << 20))
input_layer_scope = parent_scope + "/input_from_feature_columns"
with variable_scope.variable_scope(
input_layer_scope,
values=list(six.itervalues(features)),
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
weight_collections=[parent_scope],
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
| tensorflow.contrib.layers.input_from_feature_columns | 375 |
import tensorflow as tf
'worker': self.worker_hosts})
self.server = None
if not self.server:
self.server = tf.train.Server(self.cluster, job_name=self.job_name,
task_index=self.task_index,
config=create_config_proto(),
protocol=FLAGS.server_protocol)
worker_prefix = '/job:worker/task:%s' % self.task_index
self.param_server_device = tf.train.replica_device_setter(
worker_device=worker_prefix + '/cpu:0', cluster=self.cluster)
# This device on which the queues for managing synchronization between
# servers should be stored.
num_ps = len(self.ps_hosts)
self.sync_queue_devices = ['/job:ps/task:%s/cpu:0' % i
for i in range(num_ps)]
else:
self.task_index = 0
| tensorflow.train.replica_device_setter | 376 |
import tensorflow as tf
def create_optimizer(learning_rate, params):
"""Creates optimized based on the specified flags."""
if params['optimizer'] == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=params['momentum'])
elif params['optimizer'] == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
elif params['optimizer'] == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(learning_rate)
elif params['optimizer'] == 'adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate)
elif params['optimizer'] == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate, momentum=params['momentum'])
elif params['optimizer'] == 'lars':
optimizer = tf.contrib.opt.LARSOptimizer(
| tensorflow.train.AdadeltaOptimizer | 377 |
from tensorflow.python.training import moving_averages
mean: The mean value to update with.
variance: The variance value to update with.
is_training: Boolean Tensor to indicate if we're currently in
training mode.
"""
def build_update_ops():
"""Builds the exponential moving average update ops."""
update_mean_op = moving_averages.assign_moving_average(
variable=self._moving_mean,
value=mean,
decay=self._decay_rate,
name="update_moving_mean").op
update_variance_op = moving_averages.assign_moving_average(
variable=self._moving_variance,
value=variance,
decay=self._decay_rate,
name="update_moving_variance").op
return update_mean_op, update_variance_op
def build_no_ops():
return (tf.no_op(), tf.no_op())
# Only make the ops if we know that `is_training=True`, or the value of
# `is_training` is unknown.
is_training_const = utils.constant_value(is_training)
if is_training_const is None or is_training_const:
| tensorflow.python.training.moving_averages.assign_moving_average | 378 |
from tensorflow.python.ops import gen_state_ops
container: An optional string. Defaults to "".
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional string. Defaults to "".
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
Returns:
A variable tensor.
"""
ret = gen_state_ops._variable(shape=shape, dtype=dtype, name=name,
container=container, shared_name=shared_name)
# TODO(mrry): Move this to where it is used, so we can get rid of this op
# wrapper?
if set_shape:
ret.set_shape(shape)
return ret
# NOTE(mrry): Shapes are conditionally set in the Python wrapper.
| tensorflow.python.ops.gen_state_ops._variable | 379 |
from tensorflow.python.ops import state_ops
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
g_t = grad
g_t_1 = self.get_slot(var, "g")
g_t = g_t_1.assign(g_t)
var_update = state_ops.assign_sub(var,
2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t
return control_flow_ops.group(*[var_update, g_t])
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
| tensorflow.python.ops.state_ops.assign_sub | 380 |
import tensorflow.contrib.layers as layers
return out
def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64):
with tf.variable_scope(scope, reuse=reuse):
out = img_in
gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n
with tf.variable_scope("convnet"):
out = layers.convolution2d(
out, num_outputs=num_filters, kernel_size=8, stride=4,
activation_fn=tf.nn.relu, weights_initializer=gauss_initializer,
trainable=False)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
def simple_model_w_feat_eng(img_in, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
out = img_in
out = layers.flatten(out)
# stddev = 1/n, where n = number of inputs
gauss_initializer = initializers.xavier_initializer(uniform=False)
| tensorflow.contrib.layers.flatten | 381 |
import tensorflow as tf
Returns:
a `float` decov loss
"""
with tf.name_scope(name):
x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
m = tf.reduce_mean(x, 0, True)
z = tf.expand_dims(x - m, 2)
corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0)
corr_frob_sqr = tf.reduce_sum(tf.square(corr))
corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
loss = 0.5 * (corr_frob_sqr - corr_diag_sqr)
return loss
def center_loss(features, label, alpha, num_classes, name='center_loss'):
"""Center loss based on the paper "A Discriminative Feature Learning Approach
for Deep Face Recognition" (http://ydwen.github.io/papers/WenECCV16.pdf)
| tensorflow.diag_part | 382 |
from tensorflow.python.ops import init_ops
test_configs = self._GetTestConfig()
for config_name, config in test_configs.items():
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
with ops.Graph().as_default(), ops.device("/device:GPU:0"):
inputs = seq_length * [
array_ops.zeros([batch_size, num_units], dtypes.float32)
]
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell.LSTMCell(
num_units=num_units, initializer=initializer, state_is_tuple=True)
multi_cell = rnn_cell.MultiRNNCell(
[cell() for _ in range(num_layers)])
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
| tensorflow.python.ops.init_ops.random_uniform_initializer | 383 |
import tensorflow as tf
"""
assert len(input_tensor.get_shape()) == 2
assert len(idx.get_shape()) == 1
idx_flattened = tf.range(0, input_tensor.shape[0], dtype=tf.int64) * input_tensor.shape[1] + idx
offset_tensor = tf.gather(tf.reshape(input_tensor, [-1]), # flatten input
idx_flattened) # use flattened indices
| tensorflow.range | 384 |
from tensorflow.python.framework import ops
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.op_scope([x], name, "Sigmoid") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
| tensorflow.python.framework.ops.convert_to_tensor | 385 |
from tensorflow.contrib.framework.python.framework import checkpoint_utils
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
| tensorflow.contrib.framework.python.framework.checkpoint_utils.load_variable | 386 |
import tensorflow as tf
for step in num_inner_grad_steps:
sampler.sample()
algo.compute_updated_dists()
algo.optimize_policy()
sampler.update_goals()
"""
with self.sess.as_default() as sess:
# initialize uninitialized vars (only initialize vars that were not loaded)
uninit_vars = [var for var in tf.global_variables() if not sess.run(tf.is_variable_initialized(var))]
sess.run(tf.variables_initializer(uninit_vars))
start_time = time.time()
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
logger.log("\n ---------------- Iteration %d ----------------" % itr)
logger.log("Sampling set of tasks/goals for this meta-batch...")
self.sampler.update_tasks()
self.policy.switch_to_pre_update() # Switch to pre-update policy
| tensorflow.variables_initializer | 387 |
import tensorflow as tf
params.learning_rate_boundaries,
params.learning_rate_values)
elif params.learning_rate_decay == "none":
return learning_rate
else:
raise ValueError("Unknown learning_rate_decay")
def session_config(params):
optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1,
do_function_inlining=True)
graph_options = tf.GraphOptions(optimizer_options=optimizer_options)
config = tf.ConfigProto(allow_soft_placement=True,
graph_options=graph_options)
if params.device_list:
device_str = ",".join([str(i) for i in params.device_list])
config.gpu_options.visible_device_list = device_str
config.gpu_options.per_process_gpu_memory_fraction = params.gpu_memory_fraction
config.gpu_options.allow_growth = True
return config
| tensorflow.GraphOptions | 388 |
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
def benchmark_eager_defun(self):
self._benchmark_eager(defun=True)
def _benchmark_eager(self, defun=False):
"""Benchmark Eager performance."""
hparams = get_default_hparams()
for sample_size in [10, 25, 50, 100, 200]:
hparams.n_samples = sample_size
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
step_fn = tfe.defun(step) if defun else step
# Warmup to reduce initialization effect when timing
| tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.get_scg_energy_fn | 389 |
from tensorflow.python.training import device_setter
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.info('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration
self._config = BaseEstimator._Config()
# Set device function depending if there are replicas or not.
if self._config.num_ps_replicas > 0:
ps_ops = ['Variable', 'AutoReloadVariable']
self._device_fn = device_setter.replica_device_setter(
ps_tasks=self._config.num_ps_replicas,
merge_devices=False, ps_ops=ps_ops)
else:
self._device_fn = None
# Features and targets TensorSingature objects.
self._features_info = None
self._targets_info = None
@abc.abstractproperty
def _get_train_ops(self, features, targets):
| tensorflow.python.training.device_setter.replica_device_setter | 390 |
import tensorflow as tf
# limitations under the License.
import time
import numpy as np
import tensorflow as tf
import random
from tensorflow.contrib import slim
from npu_bridge.estimator import npu_ops
from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
tf.app.flags.DEFINE_integer('input_size', 512, '')
tf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '')
tf.app.flags.DEFINE_integer('num_readers', 16, '')
tf.app.flags.DEFINE_float('learning_rate', 0.0001, '')
tf.app.flags.DEFINE_integer('max_steps', 100000, '')
tf.app.flags.DEFINE_integer('loss_scale', 1024, '')
tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '')
tf.app.flags.DEFINE_string('gpu_list', '1', '')
tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '')
tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint')
tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '')
tf.app.flags.DEFINE_integer('save_summary_steps', 100, '')
tf.app.flags.DEFINE_string('pretrained_model_path', None, '')
tf.app.flags.DEFINE_boolean('allow_mix_precision', False, 'whether to allow mix precision')
tf.app.flags.DEFINE_boolean('auto_tune', False, 'whether to autotune')
tf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data')
tf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets')
| tensorflow.app.flags.DEFINE_float | 391 |
import tensorflow as tf
# This is the node that will produce the output.
output_nodes = tf.get_default_graph().get_operation_by_name('main_level/agent/main/online/' + \
'network_1/ppo_head_0/policy')
# Save the model as a servable model.
tf.saved_model.simple_save(session=sess,
export_dir='model',
inputs={"observation": input_nodes},
outputs={"policy": output_nodes.outputs[0]})
| tensorflow.saved_model.simple_save | 392 |
import tensorflow as tf
self.hparams.block_dim
],
initializer=tf.initializers.variance_scaling(distribution="uniform"))
| tensorflow.initializers.variance_scaling | 393 |
from tensorflow.python.framework import ops
self._saved_model_loader_value = None
self._loaded_saved_model_graph = None
# TODO(b/160294509): Use tf.compat.v1 when we stop supporting TF 1.15.
if ops.executing_eagerly_outside_functions():
_check_tensorflow_version()
# The model must be tracked by assigning to an attribute of the Keras
| tensorflow.python.framework.ops.executing_eagerly_outside_functions | 394 |
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
predictions: `Tensor` or `dict` of `Tensor` objects.
"""
targets = tensor_signature.create_placeholders_from_signatures(
self._targets_info)
| tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_placeholders_from_signatures | 395 |
import tensorflow as tf
tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0))
x1_valid = tf.to_float(
tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0))
y0_valid = tf.to_float(
tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0))
y1_valid = tf.to_float(
tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0))
z0_valid = tf.to_float(
tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0))
z1_valid = tf.to_float(
tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0))
w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) *
(z1_f - z) * x1_valid * y1_valid * z1_valid),
1)
w_z0_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) *
(z1_f - z) * x0_valid * y1_valid * z1_valid),
1)
w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) *
(z1_f - z) * x1_valid * y0_valid * z1_valid),
| tensorflow.less_equal | 396 |
from tensorflow.python.ops import array_ops
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_positive(beta),
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._beta = array_ops.identity(beta, name="beta")
super(InverseGamma, self).__init__(
dtype=self._alpha.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
| tensorflow.python.ops.array_ops.identity | 397 |
import tensorflow as tf
def get_assignment_map_from_checkpoint(tvars, init_checkpoint, num_of_group=0):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
init_vars_name = [name for (name, _) in init_vars]
if num_of_group > 0:
assignment_map = []
for gid in range(num_of_group):
assignment_map.append(collections.OrderedDict())
else:
assignment_map = collections.OrderedDict()
for name in name_to_variable:
if name in init_vars_name:
tvar_name = name
elif (re.sub(r"/group_\d+/", "/group_0/",
| tensorflow.train.list_variables | 398 |
from tensorflow.python.ops import math_ops
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32))
/ math_ops.log(math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor ** exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
| tensorflow.python.ops.math_ops.ceil | 399 |