text
stringlengths 5
45.8k
| id
stringlengths 18
93
| metadata
dict | __index_level_0__
int64 0
33
|
---|---|---|---|
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import ChannelShuffle
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
class OldChannelShuffle(BaseImageAugmentationLayer):
"""Shuffle channels of an input image.
Input shape:
The expected images should be [0-255] pixel ranges.
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
groups: Number of groups to divide the input channels, defaults to 3.
seed: Integer. Used to create a random seed.
Call arguments:
inputs: Tensor representing images of shape
`(batch_size, width, height, channels)`, with dtype
tf.float32 / tf.uint8,
` or (width, height, channels)`, with dtype
tf.float32 / tf.uint8
training: A boolean argument that determines whether the call should be
run in inference mode or training mode, defaults to True.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
channel_shuffle = keras_cv.layers.ChannelShuffle()
augmented_images = channel_shuffle(images)
```
"""
def __init__(self, groups=3, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.groups = groups
self.seed = seed
def augment_image(self, image, transformation=None, **kwargs):
shape = tf.shape(image)
height, width = shape[0], shape[1]
num_channels = image.shape[2]
if not num_channels % self.groups == 0:
raise ValueError(
"The number of input channels should be "
"divisible by the number of groups."
f"Received: channels={num_channels}, groups={self.groups}"
)
channels_per_group = num_channels // self.groups
image = tf.reshape(
image, [height, width, self.groups, channels_per_group]
)
image = tf.transpose(image, perm=[2, 0, 1, 3])
image = tf.random.shuffle(image, seed=self.seed)
image = tf.transpose(image, perm=[1, 2, 3, 0])
image = tf.reshape(image, [height, width, num_channels])
return image
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update({"groups": self.groups, "seed": self.seed})
return config
def compute_output_shape(self, input_shape):
return input_shape
class ChannelShuffleTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
image_shape = (1, 32, 32, 3)
groups = 3
fixed_seed = 2023 # magic number
image = tf.random.uniform(shape=image_shape)
layer = ChannelShuffle(groups=groups, seed=fixed_seed)
old_layer = OldChannelShuffle(groups=groups, seed=fixed_seed)
output = layer(image)
old_output = old_layer(image)
self.assertNotAllClose(image, output)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 3000, 4000, 5000, 10000]
results = {}
aug_candidates = [ChannelShuffle, OldChannelShuffle]
aug_args = {"groups": 3}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_channel_shuffle.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_channel_shuffle.py",
"repo_id": "keras-cv",
"token_count": 2952
} | 0 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import core
from keras_cv.layers import RandomlyZoomedCrop
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomlyZoomedCrop(BaseImageAugmentationLayer):
"""Randomly crops a part of an image and zooms it by a provided amount size.
This implementation takes a distortion-oriented approach, which means the
amount of distortion in the image is proportional to the `zoom_factor`
argument. To do this, we first sample a random value for `zoom_factor` and
`aspect_ratio_factor`. Further we deduce a `crop_size` which abides by the
calculated aspect ratio. Finally we do the actual cropping operation and
resize the image to `(height, width)`.
Args:
height: The height of the output shape.
width: The width of the output shape.
zoom_factor: A tuple of two floats, ConstantFactorSampler or
UniformFactorSampler. Represents the area relative to the original
image of the cropped image before resizing it to `(height, width)`.
aspect_ratio_factor: A tuple of two floats, ConstantFactorSampler or
UniformFactorSampler. Aspect ratio means the ratio of width to
height of the cropped image. In the context of this layer, the
aspect ratio sampled represents a value to distort the aspect ratio
by.
Represents the lower and upper bound for the aspect ratio of the
cropped image before resizing it to `(height, width)`. For most
tasks, this should be `(3/4, 4/3)`. To perform a no-op provide the
value `(1.0, 1.0)`.
interpolation: (Optional) A string specifying the sampling method for
resizing, defaults to "bilinear".
seed: (Optional) Used to create a random seed, defaults to None.
"""
def __init__(
self,
height,
width,
zoom_factor,
aspect_ratio_factor,
interpolation="bilinear",
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height = height
self.width = width
self.aspect_ratio_factor = preprocessing_utils.parse_factor(
aspect_ratio_factor,
min_value=0.0,
max_value=None,
param_name="aspect_ratio_factor",
seed=seed,
)
self.zoom_factor = preprocessing_utils.parse_factor(
zoom_factor,
min_value=0.0,
max_value=None,
param_name="zoom_factor",
seed=seed,
)
self._check_class_arguments(
height, width, zoom_factor, aspect_ratio_factor
)
self.force_output_dense_images = True
self.interpolation = interpolation
self.seed = seed
def get_random_transformation(
self, image=None, label=None, bounding_box=None, **kwargs
):
zoom_factor = self.zoom_factor()
aspect_ratio = self.aspect_ratio_factor()
original_height = tf.cast(tf.shape(image)[-3], tf.float32)
original_width = tf.cast(tf.shape(image)[-2], tf.float32)
crop_size = (
tf.round(self.height / zoom_factor),
tf.round(self.width / zoom_factor),
)
new_height = crop_size[0] / tf.sqrt(aspect_ratio)
new_width = crop_size[1] * tf.sqrt(aspect_ratio)
height_offset = self._random_generator.uniform(
(),
minval=tf.minimum(0.0, original_height - new_height),
maxval=tf.maximum(0.0, original_height - new_height),
dtype=tf.float32,
)
width_offset = self._random_generator.uniform(
(),
minval=tf.minimum(0.0, original_width - new_width),
maxval=tf.maximum(0.0, original_width - new_width),
dtype=tf.float32,
)
new_height = new_height / original_height
new_width = new_width / original_width
height_offset = height_offset / original_height
width_offset = width_offset / original_width
return (new_height, new_width, height_offset, width_offset)
def call(self, inputs, training=True):
if training:
return super().call(inputs, training)
else:
inputs = self._ensure_inputs_are_compute_dtype(inputs)
inputs, meta_data = self._format_inputs(inputs)
output = inputs
# self._resize() returns valid results for both batched and
# unbatched
output["images"] = self._resize(inputs["images"])
return self._format_output(output, meta_data)
def augment_image(self, image, transformation, **kwargs):
image_shape = tf.shape(image)
height = tf.cast(image_shape[-3], tf.float32)
width = tf.cast(image_shape[-2], tf.float32)
image = tf.expand_dims(image, axis=0)
new_height, new_width, height_offset, width_offset = transformation
transform = OldRandomlyZoomedCrop._format_transform(
[
new_width,
0.0,
width_offset * width,
0.0,
new_height,
height_offset * height,
0.0,
0.0,
]
)
image = preprocessing_utils.transform(
images=image,
transforms=transform,
output_shape=(self.height, self.width),
interpolation=self.interpolation,
fill_mode="reflect",
)
return tf.squeeze(image, axis=0)
@staticmethod
def _format_transform(transform):
transform = tf.convert_to_tensor(transform, dtype=tf.float32)
return transform[tf.newaxis]
def _resize(self, image):
outputs = keras.preprocessing.image.smart_resize(
image, (self.height, self.width)
)
# smart_resize will always output float32, so we need to re-cast.
return tf.cast(outputs, self.compute_dtype)
def _check_class_arguments(
self, height, width, zoom_factor, aspect_ratio_factor
):
if not isinstance(height, int):
raise ValueError(
"`height` must be an integer. Received height={height}"
)
if not isinstance(width, int):
raise ValueError(
"`width` must be an integer. Received width={width}"
)
if (
not isinstance(zoom_factor, (tuple, list, core.FactorSampler))
or isinstance(zoom_factor, float)
or isinstance(zoom_factor, int)
):
raise ValueError(
"`zoom_factor` must be tuple of two positive floats"
" or keras_cv.core.FactorSampler instance. Received "
f"zoom_factor={zoom_factor}"
)
if (
not isinstance(
aspect_ratio_factor, (tuple, list, core.FactorSampler)
)
or isinstance(aspect_ratio_factor, float)
or isinstance(aspect_ratio_factor, int)
):
raise ValueError(
"`aspect_ratio_factor` must be tuple of two positive floats or "
"keras_cv.core.FactorSampler instance. Received "
f"aspect_ratio_factor={aspect_ratio_factor}"
)
def augment_target(self, augment_target, **kwargs):
return augment_target
def get_config(self):
config = super().get_config()
config.update(
{
"height": self.height,
"width": self.width,
"zoom_factor": self.zoom_factor,
"aspect_ratio_factor": self.aspect_ratio_factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
if isinstance(config["zoom_factor"], dict):
config["zoom_factor"] = keras.utils.deserialize_keras_object(
config["zoom_factor"]
)
if isinstance(config["aspect_ratio_factor"], dict):
config["aspect_ratio_factor"] = (
keras.utils.deserialize_keras_object(
config["aspect_ratio_factor"]
)
)
return cls(**config)
def _crop_and_resize(self, image, transformation, method=None):
image = tf.expand_dims(image, axis=0)
boxes = transformation
# See bit.ly/tf_crop_resize for more details
augmented_image = tf.image.crop_and_resize(
image, # image shape: [B, H, W, C]
boxes, # boxes: (1, 4) in this case; represents area
# to be cropped from the original image
[0], # box_indices: maps boxes to images along batch axis
# [0] since there is only one image
(self.height, self.width), # output size
method=method or self.interpolation,
)
return tf.squeeze(augmented_image, axis=0)
class RandomlyZoomedCropTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
image_shape = (1, 64, 64, 3)
height, width = 32, 32
fixed_zoom_factor = (0.8, 0.8)
fixed_aspect_ratio_factor = (3.0 / 4.0, 3.0 / 4.0)
fixed_seed = 2023
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomlyZoomedCrop(
height,
width,
fixed_zoom_factor,
fixed_aspect_ratio_factor,
seed=fixed_seed,
)
old_layer = OldRandomlyZoomedCrop(
height,
width,
fixed_zoom_factor,
fixed_aspect_ratio_factor,
seed=fixed_seed,
)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomlyZoomedCrop, OldRandomlyZoomedCrop]
aug_args = {
"height": 16,
"width": 16,
"zoom_factor": (0.8, 1.2),
"aspect_ratio_factor": (3.0 / 4.0, 4.0 / 3.0),
}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.raw_ops.ImageProjectiveTransformV3 on XLA
# for more information please refer:
# https://github.com/tensorflow/tensorflow/issues/55194
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_randomly_zoomed_crop.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_randomly_zoomed_crop.py",
"repo_id": "keras-cv",
"token_count": 6050
} | 1 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
random_crop_and_resize_demo.py shows how to use the RandomCropAndResize
preprocessing layer for object detection.
"""
import demo_utils
import tensorflow as tf
from keras_cv.layers import preprocessing
IMG_SIZE = (256, 256)
def main():
dataset = demo_utils.load_voc_dataset(bounding_box_format="rel_xyxy")
random_rotation = preprocessing.RandomCropAndResize(
target_size=IMG_SIZE,
crop_area_factor=(0.5, 0.5),
aspect_ratio_factor=(0.5, 0.5),
bounding_box_format="rel_xyxy",
)
result = dataset.map(random_rotation, num_parallel_calls=tf.data.AUTOTUNE)
demo_utils.visualize_data(result, bounding_box_format="rel_xyxy")
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/bounding_box/random_crop_and_resize_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/bounding_box/random_crop_and_resize_demo.py",
"repo_id": "keras-cv",
"token_count": 442
} | 2 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow as tf
from absl import flags
from tensorflow import keras
from tensorflow.keras import callbacks
from tensorflow.keras import layers
from tensorflow.keras import metrics
from tensorflow.keras import optimizers
from keras_cv import losses
from keras_cv import models
from keras_cv import training
from keras_cv.datasets import imagenet
flags.DEFINE_string(
"model_name", None, "The name of the model in KerasCV.models to use."
)
flags.DEFINE_string(
"imagenet_path", None, "Directory from which to load Imagenet."
)
flags.DEFINE_string(
"backup_path", None, "Directory which will be used for training backups."
)
flags.DEFINE_string(
"weights_path",
None,
"Directory which will be used to store weight checkpoints.",
)
flags.DEFINE_string(
"tensorboard_path",
None,
"Directory which will be used to store tensorboard logs.",
)
flags.DEFINE_integer(
"batch_size", 256, "Batch size for training and evaluation."
)
flags.DEFINE_boolean(
"use_xla", True, "whether to use XLA (jit_compile) for training."
)
flags.DEFINE_float(
"initial_learning_rate",
0.1,
"Initial learning rate which will reduce on plateau.",
)
flags.DEFINE_boolean(
"include_probe",
True,
"Whether to include probing during training.",
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
if FLAGS.model_name not in models.__dict__:
raise ValueError(f"Invalid model name: {FLAGS.model_name}")
NUM_CLASSES = 1000
IMAGE_SIZE = (224, 224)
EPOCHS = 250
train_ds = imagenet.load(
split="train",
tfrecord_path=FLAGS.imagenet_path,
batch_size=FLAGS.batch_size,
img_size=IMAGE_SIZE,
shuffle=True,
shuffle_buffer=2000,
reshuffle_each_iteration=True,
)
# For TPU training, use tf.distribute.TPUStrategy()
# MirroredStrategy is best for a single machine with multiple GPUs
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = models.__dict__[FLAGS.model_name]
model = model(
include_rescaling=True,
include_top=False,
input_shape=IMAGE_SIZE + (3,),
pooling="avg",
)
trainer = training.SimCLRTrainer(
encoder=model,
augmenter=training.SimCLRAugmenter(
value_range=(0, 255), target_size=IMAGE_SIZE
),
probe=layers.Dense(NUM_CLASSES, name="linear_probe"),
)
optimizer = optimizers.SGD(
learning_rate=FLAGS.initial_learning_rate,
momentum=0.9,
global_clipnorm=10,
)
loss_fn = losses.SimCLRLoss(temperature=0.5, reduction="none")
probe_loss = keras.losses.CategoricalCrossentropy(
reduction="none", from_logits=True
)
with strategy.scope():
training_metrics = [
metrics.CategoricalAccuracy(name="probe_accuracy"),
metrics.TopKCategoricalAccuracy(name="probe_top5_accuracy", k=5),
]
training_callbacks = [
callbacks.EarlyStopping(monitor="probe_accuracy", patience=20),
callbacks.BackupAndRestore(FLAGS.backup_path),
callbacks.ModelCheckpoint(FLAGS.weights_path, save_weights_only=True),
callbacks.TensorBoard(log_dir=FLAGS.tensorboard_path),
]
if FLAGS.include_probe:
training_callbacks += [
callbacks.ReduceLROnPlateau(
monitor="probe_accuracy",
factor=0.1,
patience=5,
min_lr=0.0001,
min_delta=0.005,
)
]
trainer.compile(
encoder_optimizer=optimizer,
encoder_loss=loss_fn,
probe_optimizer=optimizers.Adam(global_clipnorm=10),
probe_metrics=training_metrics,
probe_loss=probe_loss,
jit_compile=FLAGS.use_xla,
)
trainer.fit(
train_ds,
epochs=EPOCHS,
callbacks=training_callbacks,
)
| keras-cv/examples/training/contrastive/imagenet/simclr_training.py/0 | {
"file_path": "keras-cv/examples/training/contrastive/imagenet/simclr_training.py",
"repo_id": "keras-cv",
"token_count": 1661
} | 3 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random as python_random
from keras_cv.backend import keras
from keras_cv.backend.config import keras_3
if keras_3():
from keras.random import * # noqa: F403, F401
else:
from keras_core.random import * # noqa: F403, F401
def _make_default_seed():
return python_random.randint(1, int(1e9))
class SeedGenerator:
def __new__(cls, seed=None, **kwargs):
if keras_3():
return keras.random.SeedGenerator(seed=seed, **kwargs)
return super().__new__(cls)
def __init__(self, seed=None):
if seed is None:
seed = _make_default_seed()
self._initial_seed = seed
self._current_seed = [0, seed]
def next(self, ordered=True):
self._current_seed[0] += 1
return self._current_seed[:]
def get_config(self):
return {"seed": self._initial_seed}
@classmethod
def from_config(cls, config):
return cls(**config)
def _draw_seed(seed):
if keras_3():
# Keras 3 seed can be directly passed to random functions
return seed
if isinstance(seed, SeedGenerator):
init_seed = seed.next()
else:
if seed is None:
seed = _make_default_seed()
init_seed = [0, seed]
return init_seed
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
seed = _draw_seed(seed)
kwargs = {}
if dtype:
kwargs["dtype"] = dtype
if keras_3():
return keras.random.normal(
shape,
mean=mean,
stddev=stddev,
seed=seed,
**kwargs,
)
else:
import tensorflow as tf
return tf.random.stateless_normal(
shape,
mean=mean,
stddev=stddev,
seed=seed,
**kwargs,
)
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
init_seed = _draw_seed(seed)
kwargs = {}
if dtype:
kwargs["dtype"] = dtype
if keras_3():
return keras.random.uniform(
shape,
minval=minval,
maxval=maxval,
seed=init_seed,
**kwargs,
)
else:
import tensorflow as tf
return tf.random.stateless_uniform(
shape,
minval=minval,
maxval=maxval,
seed=init_seed,
**kwargs,
)
def shuffle(x, axis=0, seed=None):
init_seed = _draw_seed(seed)
if keras_3():
return keras.random.shuffle(x=x, axis=axis, seed=init_seed)
else:
import tensorflow as tf
return tf.random.stateless_shuffle(x=x, axis=axis, seed=init_seed)
def categorical(logits, num_samples, dtype=None, seed=None):
init_seed = _draw_seed(seed)
kwargs = {}
if dtype:
kwargs["dtype"] = dtype
if keras_3():
return keras.random.categorical(
logits=logits,
num_samples=num_samples,
seed=init_seed,
**kwargs,
)
else:
import tensorflow as tf
return tf.random.stateless_categorical(
logits=logits,
num_samples=num_samples,
seed=init_seed,
**kwargs,
)
| keras-cv/keras_cv/backend/random.py/0 | {
"file_path": "keras-cv/keras_cv/backend/random.py",
"repo_id": "keras-cv",
"token_count": 1790
} | 4 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import keras_cv.bounding_box.validate_format as validate_format
from keras_cv import backend
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
@keras_cv_export("keras_cv.bounding_box.to_ragged")
def to_ragged(bounding_boxes, sentinel=-1, dtype=tf.float32):
"""converts a Dense padded bounding box `tf.Tensor` to a `tf.RaggedTensor`.
Bounding boxes are ragged tensors in most use cases. Converting them to a
dense tensor makes it easier to work with Tensorflow ecosystem.
This function can be used to filter out the masked out bounding boxes by
checking for padded sentinel value of the class_id axis of the
bounding_boxes.
Usage:
```python
bounding_boxes = {
"boxes": tf.constant([[2, 3, 4, 5], [0, 1, 2, 3]]),
"classes": tf.constant([[-1, 1]]),
}
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
print(bounding_boxes)
# {
# "boxes": [[0, 1, 2, 3]],
# "classes": [[1]]
# }
```
Args:
bounding_boxes: a Tensor of bounding boxes. May be batched, or
unbatched.
sentinel: The value indicating that a bounding box does not exist at the
current index, and the corresponding box is padding, defaults to -1.
dtype: the data type to use for the underlying Tensors.
Returns:
dictionary of `tf.RaggedTensor` or 'tf.Tensor' containing the filtered
bounding boxes.
"""
if backend.supports_ragged() is False:
raise NotImplementedError(
"`bounding_box.to_ragged` was called using a backend which does "
"not support ragged tensors. "
f"Current backend: {keras.backend.backend()}."
)
info = validate_format.validate_format(bounding_boxes)
if info["ragged"]:
return bounding_boxes
boxes = bounding_boxes.get("boxes")
classes = bounding_boxes.get("classes")
confidence = bounding_boxes.get("confidence", None)
mask = classes != sentinel
boxes = tf.ragged.boolean_mask(boxes, mask)
classes = tf.ragged.boolean_mask(classes, mask)
if confidence is not None:
confidence = tf.ragged.boolean_mask(confidence, mask)
if isinstance(boxes, tf.Tensor):
boxes = tf.RaggedTensor.from_tensor(boxes)
if isinstance(classes, tf.Tensor) and len(classes.shape) > 1:
classes = tf.RaggedTensor.from_tensor(classes)
if confidence is not None:
if isinstance(confidence, tf.Tensor) and len(confidence.shape) > 1:
confidence = tf.RaggedTensor.from_tensor(confidence)
result = bounding_boxes.copy()
result["boxes"] = tf.cast(boxes, dtype)
result["classes"] = tf.cast(classes, dtype)
if confidence is not None:
result["confidence"] = tf.cast(confidence, dtype)
return result
| keras-cv/keras_cv/bounding_box/to_ragged.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/to_ragged.py",
"repo_id": "keras-cv",
"token_count": 1271
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer to convert Waymo Open Dataset proto to model inputs."""
from typing import Any
from typing import Dict
from typing import List
from typing import Sequence
from typing import Tuple
import numpy as np
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.utils import assert_waymo_open_dataset_installed
try:
from waymo_open_dataset import dataset_pb2
from waymo_open_dataset.utils import box_utils
from waymo_open_dataset.utils import frame_utils
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
except ImportError:
waymo_open_dataset = None
from keras_cv.datasets.waymo import struct
from keras_cv.layers.object_detection_3d import voxel_utils
WOD_FRAME_OUTPUT_SIGNATURE = {
"frame_id": tf.TensorSpec((), tf.int64),
"timestamp_offset": tf.TensorSpec((), tf.float32),
"timestamp_micros": tf.TensorSpec((), tf.int64),
"pose": tf.TensorSpec([4, 4], tf.float32),
"point_xyz": tf.TensorSpec([None, 3], tf.float32),
"point_feature": tf.TensorSpec([None, 4], tf.float32),
"point_mask": tf.TensorSpec([None], tf.bool),
"point_range_image_row_col_sensor_id": tf.TensorSpec([None, 3], tf.float32),
# Please refer to Waymo Open Dataset label proto for definitions.
"label_box": tf.TensorSpec([None, 7], tf.float32),
"label_box_id": tf.TensorSpec([None], tf.int64),
"label_box_meta": tf.TensorSpec([None, 4], tf.float32),
"label_box_class": tf.TensorSpec([None], tf.int32),
"label_box_density": tf.TensorSpec([None], tf.int32),
"label_box_detection_difficulty": tf.TensorSpec([None], tf.int32),
"label_box_mask": tf.TensorSpec([None], tf.bool),
"label_point_class": tf.TensorSpec([None], tf.int32),
"label_point_nlz": tf.TensorSpec([None], tf.int32),
}
# Maximum number of points from all lidars excluding the top lidar. Please refer
# to https://arxiv.org/pdf/1912.04838.pdf Figure 1 for sensor layouts.
_MAX_NUM_NON_TOP_LIDAR_POINTS = 30000
def _decode_range_images(frame) -> Dict[int, List[tf.Tensor]]:
"""Decodes range images from a Waymo Open Dataset frame.
Please refer to https://arxiv.org/pdf/1912.04838.pdf for more details.
Args:
frame: a Waymo Open Dataset frame.
Returns:
A dictionary mapping from sensor ID to list of range images ordered by
return indices.
"""
range_images = {}
for lidar in frame.lasers:
range_image_str_tensor = tf.io.decode_compressed(
lidar.ri_return1.range_image_compressed, "ZLIB"
)
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
ri_tensor = tf.reshape(
tf.convert_to_tensor(value=ri.data, dtype=tf.float32), ri.shape.dims
)
range_images[lidar.name] = [ri_tensor]
if lidar.name == dataset_pb2.LaserName.TOP:
range_image_str_tensor = tf.io.decode_compressed(
lidar.ri_return2.range_image_compressed, "ZLIB"
)
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
ri_tensor = tf.reshape(
tf.convert_to_tensor(value=ri.data, dtype=tf.float32),
ri.shape.dims,
)
range_images[lidar.name].append(ri_tensor)
return range_images
def _get_range_image_top_pose(frame) -> tf.Tensor:
"""Extracts range image pose tensor.
Args:
frame: a Waymo Open Dataset frame.
Returns:
Pose tensors for the range image.
"""
_, _, _, ri_pose = frame_utils.parse_range_image_and_camera_projection(
frame
)
assert ri_pose
ri_pose_tensor = tf.reshape(
tf.convert_to_tensor(value=ri_pose.data), ri_pose.shape.dims
)
# [H, W, 3, 3]
ri_pose_tensor_rotation = transform_utils.get_rotation_matrix(
ri_pose_tensor[..., 0], ri_pose_tensor[..., 1], ri_pose_tensor[..., 2]
)
ri_pose_tensor_translation = ri_pose_tensor[..., 3:]
ri_pose_tensor = transform_utils.get_transform(
ri_pose_tensor_rotation, ri_pose_tensor_translation
)
return ri_pose_tensor
def _get_point_top_lidar(
range_image: Sequence[tf.Tensor], frame
) -> struct.PointTensors:
"""Gets point related tensors for the top lidar.
Please refer to https://arxiv.org/pdf/1912.04838.pdf Table 2 for lidar
specifications.
Args:
range_image: range image tensors. The range image is:
[range, intensity, elongation, is_in_nlz].
frame: a Waymo Open Dataset frame.
Returns:
Point tensors.
"""
assert len(range_image) == 2
xyz_list = []
feature_list = []
row_col_list = []
nlz_list = []
has_second_return_list = []
is_second_return_list = []
# Extracts frame pose tensor.
frame_pose_tensor = tf.convert_to_tensor(
value=np.reshape(np.array(frame.pose.transform), [4, 4])
)
# Extracts range image pose tensor.
ri_pose_tensor = _get_range_image_top_pose(frame)
# Extracts calibration data.
calibration = _get_lidar_calibration(frame, dataset_pb2.LaserName.TOP)
extrinsic = tf.reshape(np.array(calibration.extrinsic.transform), [4, 4])
beam_inclinations = tf.constant(calibration.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
for i in range(2):
ri_tensor = range_image[i]
mask = ri_tensor[:, :, 0] > 0
mask_idx = tf.cast(tf.where(mask), dtype=tf.int32)
xyz = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(ri_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(beam_inclinations, axis=0),
pixel_pose=tf.expand_dims(ri_pose_tensor, axis=0),
frame_pose=tf.expand_dims(frame_pose_tensor, axis=0),
)
xyz = tf.gather_nd(tf.squeeze(xyz, axis=0), mask_idx)
feature = tf.gather_nd(ri_tensor[:, :, 1:3], mask_idx)
nlz = tf.gather_nd(ri_tensor[:, :, -1] > 0, mask_idx)
xyz_list.append(xyz)
feature_list.append(feature)
nlz_list.append(nlz)
row_col_list.append(mask_idx)
if i == 0:
has_second_return = range_image[1][:, :, 0] > 0
has_second_return_list.append(
tf.gather_nd(has_second_return, mask_idx)
)
is_second_return_list.append(
tf.zeros([mask_idx.shape[0]], dtype=tf.bool)
)
else:
has_second_return_list.append(
tf.zeros([mask_idx.shape[0]], dtype=tf.bool)
)
is_second_return_list.append(
tf.ones([mask_idx.shape[0]], dtype=tf.bool)
)
xyz = tf.concat(xyz_list, axis=0)
feature = tf.concat(feature_list, axis=0)
row_col = tf.concat(row_col_list, axis=0)
nlz = tf.concat(nlz_list, axis=0)
has_second_return = tf.cast(
tf.concat(has_second_return_list, axis=0), dtype=tf.float32
)
is_second_return = tf.cast(
tf.concat(is_second_return_list, axis=0), dtype=tf.float32
)
# Complete feature: intensity, elongation, has_second, is_second.
feature = tf.concat(
[
feature,
has_second_return[:, tf.newaxis],
is_second_return[:, tf.newaxis],
],
axis=-1,
)
sensor_id = (
tf.ones([xyz.shape[0], 1], dtype=tf.int32) * dataset_pb2.LaserName.TOP
)
ri_row_col_sensor_id = tf.concat([row_col, sensor_id], axis=-1)
return struct.PointTensors(
point_xyz=xyz,
point_feature=feature,
point_range_image_row_col_sensor_id=ri_row_col_sensor_id,
label_point_nlz=nlz,
)
def _get_lidar_calibration(frame, name: int):
"""Gets lidar calibration for a given lidar."""
calibration = None
for c in frame.context.laser_calibrations:
if c.name == name:
calibration = c
assert calibration is not None
return calibration
def _downsample(point: struct.PointTensors, n: int) -> struct.PointTensors:
"""Randomly samples up to n points from the given point_tensor."""
num_points = point.point_xyz.shape[0]
if num_points <= n:
return point
mask = tf.range(start=0, limit=num_points, dtype=tf.int32)
mask = tf.random.shuffle(mask)
mask_index = mask[:n]
def _gather(t: tf.Tensor) -> tf.Tensor:
return tf.gather(t, mask_index)
tensors = {key: _gather(value) for key, value in vars(point).items()}
return struct.PointTensors(**tensors)
def _get_point_lidar(
ris: Dict[int, List[tf.Tensor]],
frame,
max_num_points: int,
) -> struct.PointTensors:
"""Gets point related tensors for non-top lidar.
The main differences from top lidar extraction are related to second return
and point down sampling.
Args:
ris: Mapping from lidar ID to range image tensor. The ri format is [range,
intensity, elongation, is_in_nlz].
frame: a Waymo Open Dataset frame.
max_num_points: maximum number of points from non-top lidar.
Returns:
Point related tensors.
"""
xyz_list = []
feature_list = []
nlz_list = []
ri_row_col_sensor_id_list = []
for sensor_id in ris.keys():
ri_tensor = ris[sensor_id]
assert len(ri_tensor) == 1, f"{sensor_id}"
ri_tensor = ri_tensor[0]
calibration = _get_lidar_calibration(frame, sensor_id)
extrinsic = tf.reshape(
np.array(calibration.extrinsic.transform), [4, 4]
)
beam_inclinations = range_image_utils.compute_inclination(
tf.constant(
[
calibration.beam_inclination_min,
calibration.beam_inclination_max,
]
),
height=ri_tensor.shape[0],
)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
xyz = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(ri_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(beam_inclinations, axis=0),
)
mask = ri_tensor[:, :, 0] > 0
mask_idx = tf.cast(tf.where(mask), dtype=tf.int32)
xyz = tf.gather_nd(tf.squeeze(xyz, axis=0), mask_idx)
feature = tf.gather_nd(ri_tensor[:, :, 1:3], mask_idx)
feature = tf.concat(
[feature, tf.zeros([feature.shape[0], 2], dtype=tf.float32)],
axis=-1,
)
nlz = tf.gather_nd(ri_tensor[:, :, -1] > 0, mask_idx)
xyz_list.append(xyz)
feature_list.append(feature)
nlz_list.append(nlz)
ri_row_col_sensor_id_list.append(
tf.concat(
[
mask_idx,
sensor_id * tf.ones([nlz.shape[0], 1], dtype=tf.int32),
],
axis=-1,
)
)
xyz = tf.concat(xyz_list, axis=0)
feature = tf.concat(feature_list, axis=0)
nlz = tf.concat(nlz_list, axis=0)
ri_row_col_sensor_id = tf.concat(ri_row_col_sensor_id_list, axis=0)
point_tensors = struct.PointTensors(
point_xyz=xyz,
point_feature=feature,
point_range_image_row_col_sensor_id=ri_row_col_sensor_id,
label_point_nlz=nlz,
)
point_tensors = _downsample(point_tensors, max_num_points)
return point_tensors
def _get_point(frame, max_num_lidar_points: int) -> struct.PointTensors:
"""Gets point related tensors from a Waymo Open Dataset frame.
Args:
frame: a Waymo Open Dataset frame.
max_num_lidar_points: maximum number of points from non-top lidars.
Returns:
Point related tensors.
"""
range_images = _decode_range_images(frame)
point_top_lidar = _get_point_top_lidar(
range_images[dataset_pb2.LaserName.TOP], frame
)
range_images.pop(dataset_pb2.LaserName.TOP)
point_tensors_lidar = _get_point_lidar(
range_images, frame, max_num_lidar_points
)
merged = {}
for key in vars(point_tensors_lidar).keys():
merged[key] = tf.concat(
[getattr(point_tensors_lidar, key), getattr(point_top_lidar, key)],
axis=0,
)
return struct.PointTensors(**merged)
def _get_point_label_box(
frame,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Extracts 3D box labels from a Waymo Open Dataset frame.
Args:
frame: a Waymo Open Dataset frame.
Returns:
box_3d: [M, 7] 3d boxes.
box_meta: [M, 4] speed and accel for each box.
box_class: [M] object class of each box.
box_id: [M] unique ID of each box.
box_density: [M] number of points in each box.
box_detection_difficulty: [M] difficulty level for detection.
"""
box_3d_list = []
box_meta_list = []
box_class_list = []
box_id_list = []
box_density_list = []
box_detection_difficulty_list = []
for label in frame.laser_labels:
model_object_type = label.type
density = label.num_lidar_points_in_box
detection_difficulty = label.detection_difficulty_level
if model_object_type == 0:
continue
b = label.box
box_3d_list.extend(
[
b.center_x,
b.center_y,
b.center_z,
b.length,
b.width,
b.height,
b.heading,
]
)
meta = label.metadata
box_meta_list.extend(
[
meta.speed_x,
meta.speed_y,
meta.accel_x,
meta.accel_y,
]
)
box_class_list.append(model_object_type)
box_id = tf.bitcast(
tf.fingerprint(
tf.expand_dims(label.id.encode(encoding="ascii"), 0)
)[0],
tf.int64,
)
box_id_list.append(box_id)
box_density_list.append(density)
box_detection_difficulty_list.append(detection_difficulty)
box_3d = tf.reshape(tf.constant(box_3d_list, dtype=tf.float32), [-1, 7])
box_meta = tf.reshape(tf.constant(box_meta_list, dtype=tf.float32), [-1, 4])
box_class = tf.constant(box_class_list, dtype=tf.int32)
box_id = tf.stack(box_id_list)
box_density = tf.constant(box_density_list, dtype=tf.int32)
box_detection_difficulty = tf.constant(
box_detection_difficulty_list, dtype=tf.int32
)
return (
box_3d,
box_meta,
box_class,
box_id,
box_density,
box_detection_difficulty,
)
def _get_box_class_per_point(
box: tf.Tensor, box_class: tf.Tensor, point_xyz: tf.Tensor
) -> tf.Tensor:
"""Extracts point labels.
Args:
box: [M, 7] box tensor.
box_class: [M] class of each box.
point_xyz: [N, 3] points.
Returns:
point_box_class: [N] box class of each point.
"""
n = point_xyz.shape[0]
m = box.shape[0]
if m == 0:
return tf.zeros([n], dtype=tf.int32)
# [N, M]
point_in_box = box_utils.is_within_box_3d(point_xyz, box)
# [N]
point_in_any_box = tf.math.reduce_any(point_in_box, axis=-1)
# [N]
point_box_idx = tf.math.argmax(point_in_box, axis=-1, output_type=tf.int32)
# [N]
point_box_class = tf.where(
point_in_any_box, tf.gather(box_class, point_box_idx), 0
)
return point_box_class
def _get_point_label(frame, point_xyz: tf.Tensor) -> struct.LabelTensors:
"""Extracts labels.
Args:
frame: an open dataset frame.
point_xyz: [N, 3] tensor representing point xyz.
Returns:
Label tensors.
"""
(
box_3d,
box_meta,
box_class,
box_id,
box_density,
box_detection_difficulty,
) = _get_point_label_box(frame)
point_box_class = _get_box_class_per_point(box_3d, box_class, point_xyz)
box_mask = tf.math.greater(box_class, 0)
return struct.LabelTensors(
label_box=box_3d,
label_box_id=box_id,
label_box_meta=box_meta,
label_box_class=box_class,
label_box_density=box_density,
label_box_detection_difficulty=box_detection_difficulty,
label_box_mask=box_mask,
label_point_class=point_box_class,
)
def _point_vehicle_to_global(
point_vehicle_xyz: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms points from vehicle to global frame.
Args:
point_vehicle_xyz: [..., N, 3] vehicle xyz.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The points in global frame.
"""
rot = sdc_pose[..., 0:3, 0:3]
loc = sdc_pose[..., 0:3, 3]
return (
tf.linalg.matmul(point_vehicle_xyz, rot, transpose_b=True)
+ loc[..., tf.newaxis, :]
)
def _point_global_to_vehicle(
point_xyz: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms points from global to vehicle frame.
Args:
point_xyz: [..., N, 3] global xyz.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The points in vehicle frame.
"""
rot = sdc_pose[..., 0:3, 0:3]
loc = sdc_pose[..., 0:3, 3]
return (
tf.linalg.matmul(point_xyz, rot)
+ voxel_utils.inv_loc(rot, loc)[..., tf.newaxis, :]
)
def _box_3d_vehicle_to_global(
box_3d: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms 3D boxes from vehicle to global frame.
Args:
box_3d: [..., N, 7] 3d boxes in vehicle frame.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The boxes in global frame.
"""
center = box_3d[..., 0:3]
dim = box_3d[..., 3:6]
heading = box_3d[..., 6]
new_center = _point_vehicle_to_global(center, sdc_pose)
new_heading = (
heading
+ tf.atan2(sdc_pose[..., 1, 0], sdc_pose[..., 0, 0])[..., tf.newaxis]
)
return tf.concat([new_center, dim, new_heading[..., tf.newaxis]], axis=-1)
def _box_3d_global_to_vehicle(
box_3d: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms 3D boxes from global to vehicle frame.
Args:
box_3d: [..., N, 7] 3d boxes in global frame.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The boxes in vehicle frame.
"""
center = box_3d[..., 0:3]
dim = box_3d[..., 3:6]
heading = box_3d[..., 6]
new_center = _point_global_to_vehicle(center, sdc_pose)
new_heading = (
heading
+ tf.atan2(sdc_pose[..., 0, 1], sdc_pose[..., 0, 0])[..., tf.newaxis]
)
return tf.concat([new_center, dim, new_heading[..., tf.newaxis]], axis=-1)
@keras_cv_export("keras_cv.datasets.waymo.build_tensors_from_wod_frame")
def build_tensors_from_wod_frame(frame) -> Dict[str, tf.Tensor]:
"""Builds tensors from a Waymo Open Dataset frame.
This function is to convert range image to point cloud. User can also work
with range image directly with frame_utils functions from
waymo_open_dataset.
Args:
frame: a Waymo Open Dataset frame.
Returns:
Flat dictionary of tensors.
"""
assert_waymo_open_dataset_installed(
"keras_cv.datasets.waymo.build_tensors_from_wod_frame()"
)
frame_id_bytes = "{}_{}".format(
frame.context.name, frame.timestamp_micros
).encode(encoding="ascii")
frame_id = tf.bitcast(
tf.fingerprint(tf.expand_dims(frame_id_bytes, 0))[0], tf.int64
)
timestamp_micros = tf.constant(frame.timestamp_micros, dtype=tf.int64)
pose = tf.convert_to_tensor(
value=np.reshape(np.array(frame.pose.transform), [4, 4]),
dtype_hint=tf.float32,
)
point_tensors = _get_point(frame, _MAX_NUM_NON_TOP_LIDAR_POINTS)
point_label_tensors = _get_point_label(frame, point_tensors.point_xyz)
# Transforms lidar frames to global coordinates.
point_tensors.point_xyz = _point_vehicle_to_global(
point_tensors.point_xyz, pose
)
point_label_tensors.label_box = _box_3d_vehicle_to_global(
point_label_tensors.label_box, pose
)
# Constructs final results.
num_points = point_tensors.point_xyz.shape[0]
return {
"frame_id": frame_id,
"timestamp_offset": tf.constant(0.0, dtype=tf.float32),
"timestamp_micros": timestamp_micros,
"pose": pose,
"point_xyz": point_tensors.point_xyz,
"point_feature": point_tensors.point_feature,
"point_mask": tf.ones([num_points], dtype=tf.bool),
"point_range_image_row_col_sensor_id": point_tensors.point_range_image_row_col_sensor_id, # noqa: E501
"label_box": point_label_tensors.label_box,
"label_box_id": point_label_tensors.label_box_id,
"label_box_meta": point_label_tensors.label_box_meta,
"label_box_class": point_label_tensors.label_box_class,
"label_box_density": point_label_tensors.label_box_density,
"label_box_detection_difficulty": point_label_tensors.label_box_detection_difficulty, # noqa: E501
"label_box_mask": point_label_tensors.label_box_mask,
"label_point_class": point_label_tensors.label_point_class,
"label_point_nlz": point_tensors.label_point_nlz,
}
@keras_cv_export("keras_cv.datasets.waymo.pad_or_trim_tensors")
def pad_or_trim_tensors(
frame: Dict[str, tf.Tensor], max_num_point=199600, max_num_label_box=1000
) -> Dict[str, tf.Tensor]:
"""Pad or trim tensors from a frame to have uniform shapes.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame.
max_num_point: maximum number of lidar points to process.
max_num_label_box: maximum number of label boxes to process.
Returns:
A dictionary of feature tensors with uniform shapes.
"""
def _pad_fn(t: tf.Tensor, max_counts: int) -> tf.Tensor:
shape = [max_counts] + t.shape.as_list()[1:]
return voxel_utils._pad_or_trim_to(t, shape)
point_tensor_keys = {
"point_xyz",
"point_feature",
"point_range_image_row_col_sensor_id",
"point_mask",
"label_point_class",
"label_point_nlz",
}
box_tensor_keys = {
"label_box",
"label_box_id",
"label_box_meta",
"label_box_class",
"label_box_density",
"label_box_detection_difficulty",
"label_box_mask",
}
for key in point_tensor_keys:
t = frame[key]
if t is not None:
frame[key] = _pad_fn(t, max_num_point)
for key in box_tensor_keys:
t = frame[key]
if t is not None:
frame[key] = _pad_fn(t, max_num_label_box)
return frame
@keras_cv_export("keras_cv.datasets.waymo.transform_to_vehicle_frame")
def transform_to_vehicle_frame(
frame: Dict[str, tf.Tensor]
) -> Dict[str, tf.Tensor]:
"""Transform tensors in a frame from global coordinates to vehicle
coordinates.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame in
global frame.
Returns:
A dictionary of feature tensors in vehicle frame.
"""
assert_waymo_open_dataset_installed(
"keras_cv.datasets.waymo.transform_to_vehicle_frame()"
)
def _transform_to_vehicle_frame(
point_global_xyz: tf.Tensor,
point_mask: tf.Tensor,
box_global: tf.Tensor,
box_mask: tf.Tensor,
sdc_pose: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor]:
point_vehicle_xyz = _point_global_to_vehicle(point_global_xyz, sdc_pose)
point_vehicle_xyz = tf.where(
point_mask[..., tf.newaxis], point_vehicle_xyz, 0.0
)
box_vehicle = _box_3d_global_to_vehicle(box_global, sdc_pose)
box_vehicle = tf.where(box_mask[..., tf.newaxis], box_vehicle, 0.0)
return point_vehicle_xyz, box_vehicle
point_vehicle_xyz, box_vehicle = _transform_to_vehicle_frame(
frame["point_xyz"],
frame["point_mask"],
frame["label_box"],
frame["label_box_mask"],
frame["pose"],
)
frame["point_xyz"] = point_vehicle_xyz
frame["label_box"] = box_vehicle
# Override pose as the points and boxes are in the vehicle frame.
frame["pose"] = tf.eye(4)
if frame["label_point_nlz"] is not None:
frame["point_mask"] = tf.logical_and(
frame["point_mask"],
tf.logical_not(tf.cast(frame["label_point_nlz"], tf.bool)),
)
return frame
@keras_cv_export("keras_cv.datasets.waymo.convert_to_center_pillar_inputs")
def convert_to_center_pillar_inputs(
frame: Dict[str, tf.Tensor]
) -> Dict[str, Any]:
"""Converts an input frame into CenterPillar input format.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame
Returns:
A dictionary of two tensor dictionaries with keys "point_clouds"
and "3d_boxes".
"""
point_clouds = {
"point_xyz": frame["point_xyz"],
"point_feature": frame["point_feature"],
"point_mask": frame["point_mask"],
}
boxes = {
"boxes": frame["label_box"],
"classes": frame["label_box_class"],
"difficulty": frame["label_box_detection_difficulty"],
"mask": frame["label_box_mask"],
}
y = {
"point_clouds": point_clouds,
"3d_boxes": boxes,
}
return y
@keras_cv_export("keras_cv.datasets.waymo.build_tensors_for_augmentation")
def build_tensors_for_augmentation(
frame: Dict[str, tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Builds tensors for data augmentation from an input frame.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame
Returns:
A dictionary of two tensors with keys "point_clouds" and "bounding_boxes"
and values which are tensors of shapes [num points, num features] and
[num boxes, num features]).
"""
assert_waymo_open_dataset_installed(
"keras_cv.datasets.waymo.build_tensors_for_augmentation()"
)
point_cloud = tf.concat(
[
frame["point_xyz"][tf.newaxis, ...],
frame["point_feature"][tf.newaxis, ...],
tf.cast(frame["point_mask"], tf.float32)[tf.newaxis, :, tf.newaxis],
],
axis=-1,
)
boxes = tf.concat(
[
frame["label_box"][tf.newaxis, :],
tf.cast(frame["label_box_class"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
tf.cast(frame["label_box_mask"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
tf.cast(frame["label_box_density"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
tf.cast(frame["label_box_detection_difficulty"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
],
axis=-1,
)
return {
"point_clouds": tf.squeeze(point_cloud, axis=0),
"bounding_boxes": tf.squeeze(boxes, axis=0),
}
| keras-cv/keras_cv/datasets/waymo/transformer.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/waymo/transformer.py",
"repo_id": "keras-cv",
"token_count": 13086
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
BN_AXIS = 3
CONV_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 2.0,
"mode": "fan_out",
"distribution": "truncated_normal",
},
}
@keras_cv_export("keras_cv.layers.MBConvBlock")
class MBConvBlock(keras.layers.Layer):
def __init__(
self,
input_filters: int,
output_filters: int,
expand_ratio=1,
kernel_size=3,
strides=1,
se_ratio=0.0,
bn_momentum=0.9,
activation="swish",
survival_probability: float = 0.8,
**kwargs
):
"""
Implementation of the MBConv block (Mobile Inverted Residual Bottleneck)
from:
[MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381v4).
MBConv blocks are common blocks used in mobile-oriented and efficient
architectures, present in architectures such as MobileNet, EfficientNet,
MaxViT, etc.
MBConv blocks follow a narrow-wide-narrow structure - expanding a 1x1
convolution, applying depthwise convolution, and narrowing back to a 1x1
convolution, which is a more efficient operation than conventional
wide-narrow-wide structures.
As they're frequently used for models to be deployed to edge devices,
they're implemented as a layer for ease of use and re-use.
Args:
input_filters: int, the number of input filters
output_filters: int, the optional number of output filters after
Squeeze-Excitation
expand_ratio: default 1, the ratio by which input_filters are
multiplied to expand the structure in the middle expansion phase
kernel_size: default 3, the kernel_size to apply to the expansion
phase convolutions
strides: default 1, the strides to apply to the expansion phase
convolutions
se_ratio: default 0.0, Squeeze-Excitation happens before depthwise
convolution and before output convolution only if the se_ratio
is above 0. The filters used in this phase are chosen as the
maximum between 1 and input_filters*se_ratio
bn_momentum: default 0.9, the BatchNormalization momentum
activation: default "swish", the activation function used between
convolution operations
survival_probability: float, the optional dropout rate to apply
before the output convolution, defaults to 0.8
Returns:
A `tf.Tensor` representing a feature map, passed through the MBConv
block
Example usage:
```
inputs = tf.random.normal(shape=(1, 64, 64, 32), dtype=tf.float32)
layer = keras_cv.layers.MBConvBlock(input_filters=32, output_filters=32)
output = layer(inputs)
output.shape # TensorShape([1, 64, 64, 32])
```
""" # noqa: E501
super().__init__(**kwargs)
self.input_filters = input_filters
self.output_filters = output_filters
self.expand_ratio = expand_ratio
self.kernel_size = kernel_size
self.strides = strides
self.se_ratio = se_ratio
self.bn_momentum = bn_momentum
self.activation = activation
self.survival_probability = survival_probability
self.filters = self.input_filters * self.expand_ratio
self.filters_se = max(1, int(input_filters * se_ratio))
self.conv1 = keras.layers.Conv2D(
filters=self.filters,
kernel_size=1,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format="channels_last",
use_bias=False,
name=self.name + "expand_conv",
)
self.bn1 = keras.layers.BatchNormalization(
axis=BN_AXIS,
momentum=self.bn_momentum,
name=self.name + "expand_bn",
)
self.act = keras.layers.Activation(
self.activation, name=self.name + "activation"
)
self.depthwise = keras.layers.DepthwiseConv2D(
kernel_size=self.kernel_size,
strides=self.strides,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format="channels_last",
use_bias=False,
name=self.name + "dwconv2",
)
self.bn2 = keras.layers.BatchNormalization(
axis=BN_AXIS, momentum=self.bn_momentum, name=self.name + "bn"
)
self.se_conv1 = keras.layers.Conv2D(
self.filters_se,
1,
padding="same",
activation=self.activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=self.name + "se_reduce",
)
self.se_conv2 = keras.layers.Conv2D(
self.filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=self.name + "se_expand",
)
self.output_conv = keras.layers.Conv2D(
filters=self.output_filters,
kernel_size=1 if expand_ratio != 1 else kernel_size,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format="channels_last",
use_bias=False,
name=self.name + "project_conv",
)
self.bn3 = keras.layers.BatchNormalization(
axis=BN_AXIS,
momentum=self.bn_momentum,
name=self.name + "project_bn",
)
if self.survival_probability:
self.dropout = keras.layers.Dropout(
self.survival_probability,
noise_shape=(None, 1, 1, 1),
name=self.name + "drop",
)
def build(self, input_shape):
if self.name is None:
self.name = keras.backend.get_uid("block0")
def call(self, inputs):
# Expansion phase
if self.expand_ratio != 1:
x = self.conv1(inputs)
x = self.bn1(x)
x = self.act(x)
else:
x = inputs
# Depthwise conv
x = self.depthwise(x)
x = self.bn2(x)
x = self.act(x)
# Squeeze and excite
if 0 < self.se_ratio <= 1:
se = keras.layers.GlobalAveragePooling2D(
name=self.name + "se_squeeze"
)(x)
if BN_AXIS == 1:
se_shape = (self.filters, 1, 1)
else:
se_shape = (1, 1, self.filters)
se = keras.layers.Reshape(se_shape, name=self.name + "se_reshape")(
se
)
se = self.se_conv1(se)
se = self.se_conv2(se)
x = keras.layers.multiply([x, se], name=self.name + "se_excite")
# Output phase
x = self.output_conv(x)
x = self.bn3(x)
if self.strides == 1 and self.input_filters == self.output_filters:
if self.survival_probability:
x = self.dropout(x)
x = keras.layers.Add(name=self.name + "add")([x, inputs])
return x
def get_config(self):
config = {
"input_filters": self.input_filters,
"output_filters": self.output_filters,
"expand_ratio": self.expand_ratio,
"kernel_size": self.kernel_size,
"strides": self.strides,
"se_ratio": self.se_ratio,
"bn_momentum": self.bn_momentum,
"activation": self.activation,
"survival_probability": self.survival_probability,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/mbconv.py/0 | {
"file_path": "keras-cv/keras_cv/layers/mbconv.py",
"repo_id": "keras-cv",
"token_count": 4147
} | 7 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.backend import assert_tf_keras
from keras_cv.bounding_box import iou
from keras_cv.layers.object_detection import box_matcher
from keras_cv.layers.object_detection import sampling
from keras_cv.utils import target_gather
@keras.utils.register_keras_serializable(package="keras_cv")
class _ROISampler(keras.layers.Layer):
"""
Sample ROIs for loss related calculation.
With proposals (ROIs) and ground truth, it performs the following:
1) compute IOU similarity matrix
2) match each proposal to ground truth box based on IOU
3) samples positive matches and negative matches and return
`append_gt_boxes` augments proposals with ground truth boxes. This is
useful in 2 stage detection networks during initialization where the
1st stage often cannot produce good proposals for 2nd stage. Setting it to
True will allow it to generate more reasonable proposals at the beginning.
`background_class` allow users to set the labels for background proposals.
Default is 0, where users need to manually shift the incoming `gt_classes`
if its range is [0, num_classes).
Args:
bounding_box_format: The format of bounding boxes to generate. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
roi_matcher: a `BoxMatcher` object that matches proposals with ground
truth boxes. The positive match must be 1 and negative match must be -1.
Such assumption is not being validated here.
positive_fraction: the positive ratio w.r.t `num_sampled_rois`, defaults
to 0.25.
background_class: the background class which is used to map returned the
sampled ground truth which is classified as background.
num_sampled_rois: the number of sampled proposals per image for
further (loss) calculation, defaults to 256.
append_gt_boxes: boolean, whether gt_boxes will be appended to rois
before sample the rois, defaults to True.
""" # noqa: E501
def __init__(
self,
bounding_box_format: str,
roi_matcher: box_matcher.BoxMatcher,
positive_fraction: float = 0.25,
background_class: int = 0,
num_sampled_rois: int = 256,
append_gt_boxes: bool = True,
**kwargs,
):
assert_tf_keras("keras_cv.layers._ROISampler")
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.roi_matcher = roi_matcher
self.positive_fraction = positive_fraction
self.background_class = background_class
self.num_sampled_rois = num_sampled_rois
self.append_gt_boxes = append_gt_boxes
self.built = True
# for debugging.
self._positives = keras.metrics.Mean()
self._negatives = keras.metrics.Mean()
def call(
self,
rois: tf.Tensor,
gt_boxes: tf.Tensor,
gt_classes: tf.Tensor,
):
"""
Args:
rois: [batch_size, num_rois, 4]
gt_boxes: [batch_size, num_gt, 4]
gt_classes: [batch_size, num_gt, 1]
Returns:
sampled_rois: [batch_size, num_sampled_rois, 4]
sampled_gt_boxes: [batch_size, num_sampled_rois, 4]
sampled_box_weights: [batch_size, num_sampled_rois, 1]
sampled_gt_classes: [batch_size, num_sampled_rois, 1]
sampled_class_weights: [batch_size, num_sampled_rois, 1]
"""
if self.append_gt_boxes:
# num_rois += num_gt
rois = tf.concat([rois, gt_boxes], axis=1)
num_rois = rois.get_shape().as_list()[1]
if num_rois is None:
raise ValueError(
f"`rois` must have static shape, got {rois.get_shape()}"
)
if num_rois < self.num_sampled_rois:
raise ValueError(
"num_rois must be less than `num_sampled_rois` "
f"({self.num_sampled_rois}), got {num_rois}"
)
rois = bounding_box.convert_format(
rois, source=self.bounding_box_format, target="yxyx"
)
gt_boxes = bounding_box.convert_format(
gt_boxes, source=self.bounding_box_format, target="yxyx"
)
# [batch_size, num_rois, num_gt]
similarity_mat = iou.compute_iou(
rois, gt_boxes, bounding_box_format="yxyx", use_masking=True
)
# [batch_size, num_rois] | [batch_size, num_rois]
matched_gt_cols, matched_vals = self.roi_matcher(similarity_mat)
# [batch_size, num_rois]
positive_matches = tf.math.equal(matched_vals, 1)
negative_matches = tf.math.equal(matched_vals, -1)
self._positives.update_state(
tf.reduce_sum(tf.cast(positive_matches, tf.float32), axis=-1)
)
self._negatives.update_state(
tf.reduce_sum(tf.cast(negative_matches, tf.float32), axis=-1)
)
# [batch_size, num_rois, 1]
background_mask = tf.expand_dims(
tf.logical_not(positive_matches), axis=-1
)
# [batch_size, num_rois, 1]
matched_gt_classes = target_gather._target_gather(
gt_classes, matched_gt_cols
)
# also set all background matches to `background_class`
matched_gt_classes = tf.where(
background_mask,
tf.cast(
self.background_class * tf.ones_like(matched_gt_classes),
gt_classes.dtype,
),
matched_gt_classes,
)
# [batch_size, num_rois, 4]
matched_gt_boxes = target_gather._target_gather(
gt_boxes, matched_gt_cols
)
encoded_matched_gt_boxes = bounding_box._encode_box_to_deltas(
anchors=rois,
boxes=matched_gt_boxes,
anchor_format="yxyx",
box_format="yxyx",
variance=[0.1, 0.1, 0.2, 0.2],
)
# also set all background matches to 0 coordinates
encoded_matched_gt_boxes = tf.where(
background_mask,
tf.zeros_like(matched_gt_boxes),
encoded_matched_gt_boxes,
)
# [batch_size, num_rois]
sampled_indicators = sampling.balanced_sample(
positive_matches,
negative_matches,
self.num_sampled_rois,
self.positive_fraction,
)
# [batch_size, num_sampled_rois] in the range of [0, num_rois)
sampled_indicators, sampled_indices = tf.math.top_k(
sampled_indicators, k=self.num_sampled_rois, sorted=True
)
# [batch_size, num_sampled_rois, 4]
sampled_rois = target_gather._target_gather(rois, sampled_indices)
# [batch_size, num_sampled_rois, 4]
sampled_gt_boxes = target_gather._target_gather(
encoded_matched_gt_boxes, sampled_indices
)
# [batch_size, num_sampled_rois, 1]
sampled_gt_classes = target_gather._target_gather(
matched_gt_classes, sampled_indices
)
# [batch_size, num_sampled_rois, 1]
# all negative samples will be ignored in regression
sampled_box_weights = target_gather._target_gather(
tf.cast(positive_matches[..., tf.newaxis], gt_boxes.dtype),
sampled_indices,
)
# [batch_size, num_sampled_rois, 1]
sampled_indicators = sampled_indicators[..., tf.newaxis]
sampled_class_weights = tf.cast(sampled_indicators, gt_classes.dtype)
return (
sampled_rois,
sampled_gt_boxes,
sampled_box_weights,
sampled_gt_classes,
sampled_class_weights,
)
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"positive_fraction": self.positive_fraction,
"background_class": self.background_class,
"num_sampled_rois": self.num_sampled_rois,
"append_gt_boxes": self.append_gt_boxes,
"roi_matcher": self.roi_matcher.get_config(),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
roi_matcher_config = config.pop("roi_matcher")
roi_matcher = box_matcher.BoxMatcher(**roi_matcher_config)
return cls(roi_matcher=roi_matcher, **config)
| keras-cv/keras_cv/layers/object_detection/roi_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_sampler.py",
"repo_id": "keras-cv",
"token_count": 4104
} | 8 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Also export the image KPLs from core keras, so that user can import all the
# image KPLs from one place.
from tensorflow.keras.layers import CenterCrop
from tensorflow.keras.layers import RandomHeight
from tensorflow.keras.layers import RandomWidth
from keras_cv.layers.preprocessing.aug_mix import AugMix
from keras_cv.layers.preprocessing.auto_contrast import AutoContrast
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.channel_shuffle import ChannelShuffle
from keras_cv.layers.preprocessing.cut_mix import CutMix
from keras_cv.layers.preprocessing.equalization import Equalization
from keras_cv.layers.preprocessing.fourier_mix import FourierMix
from keras_cv.layers.preprocessing.grayscale import Grayscale
from keras_cv.layers.preprocessing.grid_mask import GridMask
from keras_cv.layers.preprocessing.jittered_resize import JitteredResize
from keras_cv.layers.preprocessing.mix_up import MixUp
from keras_cv.layers.preprocessing.mosaic import Mosaic
from keras_cv.layers.preprocessing.posterization import Posterization
from keras_cv.layers.preprocessing.rand_augment import RandAugment
from keras_cv.layers.preprocessing.random_apply import RandomApply
from keras_cv.layers.preprocessing.random_aspect_ratio import RandomAspectRatio
from keras_cv.layers.preprocessing.random_augmentation_pipeline import (
RandomAugmentationPipeline,
)
from keras_cv.layers.preprocessing.random_brightness import RandomBrightness
from keras_cv.layers.preprocessing.random_channel_shift import (
RandomChannelShift,
)
from keras_cv.layers.preprocessing.random_choice import RandomChoice
from keras_cv.layers.preprocessing.random_color_degeneration import (
RandomColorDegeneration,
)
from keras_cv.layers.preprocessing.random_color_jitter import RandomColorJitter
from keras_cv.layers.preprocessing.random_contrast import RandomContrast
from keras_cv.layers.preprocessing.random_crop import RandomCrop
from keras_cv.layers.preprocessing.random_crop_and_resize import (
RandomCropAndResize,
)
from keras_cv.layers.preprocessing.random_cutout import RandomCutout
from keras_cv.layers.preprocessing.random_flip import RandomFlip
from keras_cv.layers.preprocessing.random_gaussian_blur import (
RandomGaussianBlur,
)
from keras_cv.layers.preprocessing.random_hue import RandomHue
from keras_cv.layers.preprocessing.random_jpeg_quality import RandomJpegQuality
from keras_cv.layers.preprocessing.random_rotation import RandomRotation
from keras_cv.layers.preprocessing.random_saturation import RandomSaturation
from keras_cv.layers.preprocessing.random_sharpness import RandomSharpness
from keras_cv.layers.preprocessing.random_shear import RandomShear
from keras_cv.layers.preprocessing.random_translation import RandomTranslation
from keras_cv.layers.preprocessing.random_zoom import RandomZoom
from keras_cv.layers.preprocessing.repeated_augmentation import (
RepeatedAugmentation,
)
from keras_cv.layers.preprocessing.rescaling import Rescaling
from keras_cv.layers.preprocessing.resizing import Resizing
from keras_cv.layers.preprocessing.solarization import Solarization
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
| keras-cv/keras_cv/layers/preprocessing/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/__init__.py",
"repo_id": "keras-cv",
"token_count": 1195
} | 9 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class GrayscaleTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 52, 24, 3))
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, (2, 52, 24, 1))
self.assertEqual(xs2.shape, (2, 52, 24, 3))
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack([2 * tf.ones((10, 10, 3)), tf.ones((10, 10, 3))], axis=0),
tf.float32,
)
# test 1
layer = preprocessing.Grayscale(
output_channels=1,
)
@tf.function
def augment(x):
return layer(x, training=True)
xs1 = augment(xs)
# test 2
layer = preprocessing.Grayscale(
output_channels=3,
)
@tf.function
def augment(x):
return layer(x, training=True)
xs2 = augment(xs)
self.assertEqual(xs1.shape, (2, 10, 10, 1))
self.assertEqual(xs2.shape, (2, 10, 10, 3))
def test_non_square_image(self):
xs = tf.cast(
tf.stack([2 * tf.ones((52, 24, 3)), tf.ones((52, 24, 3))], axis=0),
tf.float32,
)
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, (2, 52, 24, 1))
self.assertEqual(xs2.shape, (2, 52, 24, 3))
def test_in_single_image(self):
xs = tf.cast(
tf.ones((52, 24, 3)),
dtype=tf.float32,
)
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, (52, 24, 1))
self.assertEqual(xs2.shape, (52, 24, 3))
| keras-cv/keras_cv/layers/preprocessing/grayscale_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/grayscale_test.py",
"repo_id": "keras-cv",
"token_count": 1376
} | 10 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import get_interpolation
from keras_cv.utils import parse_factor
@keras_cv_export("keras_cv.layers.RandomAspectRatio")
class RandomAspectRatio(BaseImageAugmentationLayer):
"""RandomAspectRatio randomly distorts the aspect ratio of the provided
image.
This is done on an element-wise basis, and as a consequence this layer
always returns a tf.RaggedTensor.
Args:
factor: a range of values in the range `(0, infinity)` that determines
the percentage to distort the aspect ratio of each image by.
interpolation: interpolation method used in the `Resize` op.
Supported values are `"nearest"` and `"bilinear"`.
Defaults to `"bilinear"`.
"""
def __init__(
self,
factor,
interpolation="bilinear",
bounding_box_format=None,
seed=None,
**kwargs
):
super().__init__(**kwargs)
self.interpolation = get_interpolation(interpolation)
self.factor = parse_factor(
factor,
min_value=0.0,
max_value=None,
seed=seed,
param_name="factor",
)
self.bounding_box_format = bounding_box_format
self.seed = seed
self.auto_vectorize = False
self.force_output_ragged_images = True
def get_random_transformation(self, **kwargs):
return self.factor(dtype=self.compute_dtype)
def compute_image_signature(self, images):
return tf.RaggedTensorSpec(
shape=(None, None, images.shape[-1]),
ragged_rank=1,
dtype=self.compute_dtype,
)
def augment_bounding_boxes(
self, bounding_boxes, transformation, image, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"Please provide a `bounding_box_format` when augmenting "
"bounding boxes with `RandomAspectRatio()`."
)
bounding_boxes = bounding_boxes.copy()
img_shape = tf.shape(image)
img_shape = tf.cast(img_shape, self.compute_dtype)
height, width = img_shape[0], img_shape[1]
height = height / transformation
width = width * transformation
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
image_shape=img_shape,
)
x, y, x2, y2 = tf.split(bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1)
x = x * transformation
x2 = x2 * transformation
y = y / transformation
y2 = y2 / transformation
boxes = tf.concat([x, y, x2, y2], axis=-1)
boxes = bounding_box.convert_format(
boxes,
source="xyxy",
target=self.bounding_box_format,
image_shape=tf.stack([height, width, 3], axis=0),
)
bounding_boxes["boxes"] = boxes
return bounding_boxes
def augment_image(self, image, transformation, **kwargs):
# images....transformation
img_shape = tf.cast(tf.shape(image), self.compute_dtype)
height, width = img_shape[0], img_shape[1]
height = height / transformation
width = width * transformation
target_size = tf.cast(tf.stack([height, width]), tf.int32)
result = tf.image.resize(
image, size=target_size, method=self.interpolation
)
return tf.cast(result, self.compute_dtype)
def augment_label(self, label, transformation, **kwargs):
return label
def get_config(self):
config = {
"factor": self.factor,
"interpolation": self.interpolation,
"bounding_box_format": self.bounding_box_format,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/random_aspect_ratio.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_aspect_ratio.py",
"repo_id": "keras-cv",
"token_count": 2007
} | 11 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv import layers as cv_layers
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.RandomCrop")
class RandomCrop(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly crops images.
This layer will randomly choose a location to crop images down to a target
size.
If an input image is smaller than the target size, the input will be
resized and cropped to return the largest possible window in the image that
matches the target aspect ratio.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
"""
def __init__(
self, height, width, seed=None, bounding_box_format=None, **kwargs
):
super().__init__(
**kwargs,
autocast=False,
seed=seed,
)
self.height = height
self.width = width
self.bounding_box_format = bounding_box_format
self.seed = seed
self.force_output_dense_images = True
def compute_ragged_image_signature(self, images):
ragged_spec = tf.RaggedTensorSpec(
shape=(self.height, self.width, images.shape[-1]),
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
def get_random_transformation_batch(self, batch_size, **kwargs):
tops = tf.cast(
self._random_generator.uniform(
shape=(batch_size, 1), minval=0, maxval=1
),
self.compute_dtype,
)
lefts = tf.cast(
self._random_generator.uniform(
shape=(batch_size, 1), minval=0, maxval=1
),
self.compute_dtype,
)
return {"tops": tops, "lefts": lefts}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
tops = transformation["tops"]
lefts = transformation["lefts"]
transformation = {
"tops": tf.expand_dims(tops, axis=0),
"lefts": tf.expand_dims(lefts, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
batch_size = tf.shape(images)[0]
channel = tf.shape(images)[-1]
heights, widths = self._get_image_shape(images)
h_diffs = heights - self.height
w_diffs = widths - self.width
# broadcast
h_diffs = (
tf.ones(
shape=(batch_size, self.height, self.width, channel),
dtype=tf.int32,
)
* h_diffs[:, tf.newaxis, tf.newaxis, :]
)
w_diffs = (
tf.ones(
shape=(batch_size, self.height, self.width, channel),
dtype=tf.int32,
)
* w_diffs[:, tf.newaxis, tf.newaxis, :]
)
return tf.where(
tf.math.logical_and(h_diffs >= 0, w_diffs >= 0),
self._crop_images(images, transformations),
self._resize_images(images),
)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_bounding_boxes(
self, bounding_boxes, transformations, raw_images=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomCrop()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomCrop(bounding_box_format='xyxy')`"
)
if isinstance(bounding_boxes["boxes"], tf.RaggedTensor):
bounding_boxes = bounding_box.to_dense(
bounding_boxes, default_value=-1
)
batch_size = tf.shape(raw_images)[0]
heights, widths = self._get_image_shape(raw_images)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
images=raw_images,
)
h_diffs = heights - self.height
w_diffs = widths - self.width
# broadcast
num_bounding_boxes = tf.shape(bounding_boxes["boxes"])[-2]
h_diffs = (
tf.ones(
shape=(batch_size, num_bounding_boxes, 4),
dtype=tf.int32,
)
* h_diffs[:, tf.newaxis, :]
)
w_diffs = (
tf.ones(
shape=(batch_size, num_bounding_boxes, 4),
dtype=tf.int32,
)
* w_diffs[:, tf.newaxis, :]
)
boxes = tf.where(
tf.math.logical_and(h_diffs >= 0, w_diffs >= 0),
self._crop_bounding_boxes(
raw_images, bounding_boxes["boxes"], transformations
),
self._resize_bounding_boxes(
raw_images,
bounding_boxes["boxes"],
),
)
bounding_boxes["boxes"] = boxes
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="xyxy",
image_shape=(self.height, self.width, None),
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
image_shape=(self.height, self.width, None),
)
return bounding_boxes
def _get_image_shape(self, images):
if isinstance(images, tf.RaggedTensor):
heights = tf.reshape(images.row_lengths(), (-1, 1))
widths = tf.reshape(
tf.reduce_max(images.row_lengths(axis=2), 1), (-1, 1)
)
else:
batch_size = tf.shape(images)[0]
heights = tf.repeat(tf.shape(images)[H_AXIS], repeats=[batch_size])
heights = tf.reshape(heights, shape=(-1, 1))
widths = tf.repeat(tf.shape(images)[W_AXIS], repeats=[batch_size])
widths = tf.reshape(widths, shape=(-1, 1))
return tf.cast(heights, dtype=tf.int32), tf.cast(widths, dtype=tf.int32)
def _crop_images(self, images, transformations):
batch_size = tf.shape(images)[0]
heights, widths = self._get_image_shape(images)
heights = tf.cast(heights, dtype=self.compute_dtype)
widths = tf.cast(widths, dtype=self.compute_dtype)
tops = transformations["tops"]
lefts = transformations["lefts"]
x1s = lefts * (widths - self.width)
y1s = tops * (heights - self.height)
x2s = x1s + self.width
y2s = y1s + self.height
# normalize
x1s /= widths
y1s /= heights
x2s /= widths
y2s /= heights
boxes = tf.concat([y1s, x1s, y2s, x2s], axis=-1)
images = tf.image.crop_and_resize(
tf.cast(images, tf.float32),
tf.cast(boxes, tf.float32),
tf.range(batch_size),
[self.height, self.width],
method="nearest",
)
return tf.cast(images, dtype=self.compute_dtype)
def _resize_images(self, images):
resizing_layer = cv_layers.Resizing(self.height, self.width)
outputs = resizing_layer(images)
return tf.cast(outputs, dtype=self.compute_dtype)
def _crop_bounding_boxes(self, images, boxes, transformation):
tops = transformation["tops"]
lefts = transformation["lefts"]
heights, widths = self._get_image_shape(images)
heights = tf.cast(heights, dtype=self.compute_dtype)
widths = tf.cast(widths, dtype=self.compute_dtype)
# compute offsets for xyxy bounding_boxes
top_offsets = tf.cast(
tf.math.round(tops * (heights - self.height)),
dtype=self.compute_dtype,
)
left_offsets = tf.cast(
tf.math.round(lefts * (widths - self.width)),
dtype=self.compute_dtype,
)
x1s, y1s, x2s, y2s = tf.split(
tf.cast(boxes, self.compute_dtype), 4, axis=-1
)
x1s -= tf.expand_dims(left_offsets, axis=1)
y1s -= tf.expand_dims(top_offsets, axis=1)
x2s -= tf.expand_dims(left_offsets, axis=1)
y2s -= tf.expand_dims(top_offsets, axis=1)
outputs = tf.concat([x1s, y1s, x2s, y2s], axis=-1)
return outputs
def _resize_bounding_boxes(self, images, boxes):
heights, widths = self._get_image_shape(images)
heights = tf.cast(heights, dtype=self.compute_dtype)
widths = tf.cast(widths, dtype=self.compute_dtype)
x_scale = tf.cast(self.width / widths, dtype=self.compute_dtype)
y_scale = tf.cast(self.height / heights, dtype=self.compute_dtype)
x1s, y1s, x2s, y2s = tf.split(
tf.cast(boxes, self.compute_dtype), 4, axis=-1
)
outputs = tf.concat(
[
x1s * x_scale[:, tf.newaxis, :],
y1s * y_scale[:, tf.newaxis, :],
x2s * x_scale[:, tf.newaxis, :],
y2s * y_scale[:, tf.newaxis, :],
],
axis=-1,
)
return outputs
def get_config(self):
config = {
"height": self.height,
"width": self.width,
"seed": self.seed,
"bounding_box_format": self.bounding_box_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_crop.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_crop.py",
"repo_id": "keras-cv",
"token_count": 5385
} | 12 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandomSaturation")
class RandomSaturation(VectorizedBaseImageAugmentationLayer):
"""Randomly adjusts the saturation on given images.
This layer will randomly increase/reduce the saturation for the input RGB
images.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image saturation is impacted. `factor=0.5` makes this layer perform
a no-op operation. `factor=0.0` makes the image to be fully
grayscale. `factor=1.0` makes the image to be fully saturated.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_saturation = keras_cv.layers.preprocessing.RandomSaturation()
augmented_images = random_saturation(images)
```
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
min_value=0.0,
max_value=1.0,
)
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
return self.factor(shape=(batch_size,))
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
images=image, transformations=transformation, **kwargs
)
def augment_images(self, images, transformations, **kwargs):
# Convert the factor range from [0, 1] to [0, +inf]. Note that the
# tf.image.adjust_saturation is trying to apply the following math
# formula `output_saturation = input_saturation * factor`. We use the
# following method to the do the mapping.
# `y = x / (1 - x)`.
# This will ensure:
# y = +inf when x = 1 (full saturation)
# y = 1 when x = 0.5 (no augmentation)
# y = 0 when x = 0 (full gray scale)
# Convert the transformation to tensor in case it is a float. When
# transformation is 1.0, then it will result in to divide by zero error,
# but it will be handled correctly when it is a one tensor.
transformations = tf.convert_to_tensor(transformations)
adjust_factors = transformations / (1 - transformations)
adjust_factors = tf.cast(adjust_factors, dtype=images.dtype)
images = tf.image.rgb_to_hsv(images)
s_channel = tf.multiply(
images[..., 1], adjust_factors[..., tf.newaxis, tf.newaxis]
)
s_channel = tf.clip_by_value(
s_channel, clip_value_min=0.0, clip_value_max=1.0
)
images = tf.stack([images[..., 0], s_channel, images[..., 2]], axis=-1)
images = tf.image.hsv_to_rgb(images)
return images
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_saturation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_saturation.py",
"repo_id": "keras-cv",
"token_count": 1942
} | 13 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.Solarization")
class Solarization(VectorizedBaseImageAugmentationLayer):
"""Applies (max_value - pixel + min_value) for each pixel in the image.
When created without `threshold` parameter, the layer performs solarization
to all values. When created with specified `threshold` the layer only
augments pixels that are above the `threshold` value
Reference:
- [AutoAugment: Learning Augmentation Policies from Data](
https://arxiv.org/abs/1805.09501
)
- [RandAugment](https://arxiv.org/pdf/1909.13719.pdf)
Args:
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`.
addition_factor: (Optional) A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, addition_factor)`. If specified, this value is
added to each pixel before solarization and thresholding. The
addition value should be scaled according to the value range
(0, 255), defaults to 0.0.
threshold_factor: (Optional) A tuple of two floats, a single float or
a `keras_cv.FactorSampler`. For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, threshold_factor)`. If specified, only pixel
values above this threshold will be solarized.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
print(images[0, 0, 0])
# [59 62 63]
# Note that images are Tensor with values in the range [0, 255]
solarization = Solarization(value_range=(0, 255))
images = solarization(images)
print(images[0, 0, 0])
# [196, 193, 192]
```
Call arguments:
images: Tensor of type int or float, with pixels in
range [0, 255] and shape [batch, height, width, channels]
or [height, width, channels].
"""
def __init__(
self,
value_range,
addition_factor=0.0,
threshold_factor=0.0,
seed=None,
**kwargs
):
super().__init__(seed=seed, **kwargs)
self.seed = seed
self.addition_factor = preprocessing.parse_factor(
addition_factor,
max_value=255,
seed=seed,
param_name="addition_factor",
)
self.threshold_factor = preprocessing.parse_factor(
threshold_factor,
max_value=255,
seed=seed,
param_name="threshold_factor",
)
self.value_range = value_range
def get_random_transformation_batch(self, batch_size, **kwargs):
return {
"additions": self.addition_factor(
shape=(batch_size, 1, 1, 1), dtype=self.compute_dtype
),
"thresholds": self.threshold_factor(
shape=(batch_size, 1, 1, 1), dtype=self.compute_dtype
),
}
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(image, transformation)
def augment_images(self, images, transformations, **kwargs):
thresholds = transformations["thresholds"]
additions = transformations["additions"]
images = preprocessing.transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
results = images + additions
results = tf.clip_by_value(results, 0, 255)
results = tf.where(results < thresholds, results, 255 - results)
results = preprocessing.transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return results
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def get_config(self):
config = {
"threshold_factor": self.threshold_factor,
"addition_factor": self.addition_factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["threshold_factor"], dict):
config["threshold_factor"] = keras.utils.deserialize_keras_object(
config["threshold_factor"]
)
if isinstance(config["addition_factor"], dict):
config["addition_factor"] = keras.utils.deserialize_keras_object(
config["addition_factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/solarization.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/solarization.py",
"repo_id": "keras-cv",
"token_count": 2553
} | 14 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from tensorflow import keras
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.frustum_random_point_feature_noise import ( # noqa: E501
FrustumRandomPointFeatureNoise,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
POINTCLOUD_LABEL_INDEX = base_augmentation_layer_3d.POINTCLOUD_LABEL_INDEX
class FrustumRandomPointFeatureNoiseTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0, theta_width=1, phi_width=1, max_noise_level=0.5
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
# bounding boxes and point clouds (x, y, z, class) are not modified.
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
self.assertAllClose(
inputs[POINT_CLOUDS][:, :, :POINTCLOUD_LABEL_INDEX],
outputs[POINT_CLOUDS][:, :, :POINTCLOUD_LABEL_INDEX],
)
def test_augment_specific_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = FrustumRandomPointFeatureNoise(
r_distance=10,
theta_width=np.pi,
phi_width=1.5 * np.pi,
max_noise_level=0.5,
)
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4, 5],
[10, 1, 2, 3, 4, 2],
[100, 100, 2, 3, 4, 1],
[-20, -20, 21, 1, 0, 2],
]
]
* 2
).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
# bounding boxes and point clouds (x, y, z, class) are not modified.
augmented_point_clouds = np.array(
[
[
[0, 1, 2, 3, 4, 5],
[10, 1, 2, 3, 4, 2],
[100, 100, 2, 3, 4, 1],
[-20, -20, 21, 1, 0, 1.3747642],
],
[
[0, 1, 2, 3, 4, 5],
[10, 1, 2, 3, 4, 2],
[100, 100, 2, 3, 4, 1],
[-20, -20, 21, 1, 0, 1.6563809],
],
]
).astype("float32")
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
# [-20, -20, 21, 1, 0, 2] is randomly selected as the frustum center.
# [0, 1, 2, 3, 4, 5] and [10, 1, 2, 3, 4, 2] are not changed due to less
# than r_distance. [100, 100, 2, 3, 4, 1] is not changed due to outside
# phi_width.
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
def test_augment_only_one_valid_point_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = FrustumRandomPointFeatureNoise(
r_distance=10,
theta_width=np.pi,
phi_width=1.5 * np.pi,
max_noise_level=0.5,
)
point_clouds = np.array(
[
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[100, 100, 2, 3, 4, 1],
[0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
# bounding boxes and point clouds (x, y, z, class) are not modified.
augmented_point_clouds = np.array(
[
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[100, 100, 2, 3, 4.119616, 0.619783],
[0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[100, 100, 2, 3, 3.192014, 0.618371],
[0, 0, 0, 0, 0, 0],
],
]
).astype("float32")
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
# [100, 100, 2, 3, 4, 1] is selected as the frustum center because it is
# the only valid point.
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
def test_not_augment_max_noise_level0_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0, theta_width=1, phi_width=1, max_noise_level=0.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_not_augment_max_noise_level1_frustum_empty_point_clouds_and_bounding_boxes( # noqa: E501
self,
):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=10, theta_width=0, phi_width=0, max_noise_level=1.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_exclude_all_points(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0,
theta_width=1,
phi_width=1,
max_noise_level=1.0,
exclude_classes=1,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
exclude_classes = np.ones(shape=(2, 50, 1)).astype("float32")
point_clouds = np.concatenate([point_clouds, exclude_classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_exclude_the_first_half_points(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0,
theta_width=10,
phi_width=10,
max_noise_level=1.0,
exclude_classes=[1, 2],
)
point_clouds = np.random.random(size=(2, 10, 10)).astype("float32")
class_1 = np.ones(shape=(2, 2, 1)).astype("float32")
class_2 = np.ones(shape=(2, 3, 1)).astype("float32") * 2
classes = np.concatenate(
[class_1, class_2, np.zeros(shape=(2, 5, 1)).astype("float32")],
axis=1,
)
point_clouds = np.concatenate([point_clouds, classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(
inputs[POINT_CLOUDS][:, :5, :], outputs[POINT_CLOUDS][:, :5, :]
)
self.assertNotAllClose(
inputs[POINT_CLOUDS][:, 5:, :], outputs[POINT_CLOUDS][:, 5:, :]
)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0, theta_width=1, phi_width=1, max_noise_level=0.5
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_point_feature_noise_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_point_feature_noise_test.py",
"repo_id": "keras-cv",
"token_count": 4481
} | 15 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from tensorflow import keras
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.random_drop_box import RandomDropBox
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
ADDITIONAL_POINT_CLOUDS = base_augmentation_layer_3d.ADDITIONAL_POINT_CLOUDS
ADDITIONAL_BOUNDING_BOXES = base_augmentation_layer_3d.ADDITIONAL_BOUNDING_BOXES
class RandomDropBoxTest(TestCase):
def test_drop_class1_box_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = RandomDropBox(label_index=1, max_drop_bounding_boxes=4)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Drop the first object bounding box [0, 0, 0, 4, 4, 4, 0, 1] and
# points.
augmented_point_clouds = np.array(
[
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[10, 1, 2, 3, 4],
[0, 0, 0, 0, 0],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 0, 0, 0, 0, 0, 0],
[20, 20, 20, 1, 1, 1, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_drop_both_boxes_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = RandomDropBox(max_drop_bounding_boxes=4)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Drop both object bounding boxes and points.
augmented_point_clouds = np.array(
[
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[10, 1, 2, 3, 4],
[0, 0, 0, 0, 0],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_not_drop_any_box_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = RandomDropBox(max_drop_bounding_boxes=0)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Do not drop any bounding box or point.
augmented_point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_batch_drop_one_of_the_box_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(4)
add_layer = RandomDropBox(max_drop_bounding_boxes=2)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
]
* 3
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Batch 0: drop the first bounding box [0, 0, 0, 4, 4, 4, 0, 1] and
# points,
# Batch 1,2: drop the second bounding box [20, 20, 20, 3, 3, 3, 0, 2]
# and points,
augmented_point_clouds = np.array(
[
[
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[10, 1, 2, 3, 4],
[0, 0, 0, 0, 0],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2,
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2,
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2,
]
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[
[0, 0, 0, 0, 0, 0, 0, 0],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2,
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2,
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2,
]
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_drop_box_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_drop_box_test.py",
"repo_id": "keras-cv",
"token_count": 7814
} | 16 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.layers.TransformerEncoder")
class TransformerEncoder(layers.Layer):
"""
Transformer encoder block implementation as a Keras Layer.
Args:
project_dim: the dimensionality of the projection of the encoder, and
output of the `MultiHeadAttention`
mlp_dim: the intermediate dimensionality of the MLP head before
projecting to `project_dim`
num_heads: the number of heads for the `MultiHeadAttention` layer
mlp_dropout: default 0.1, the dropout rate to apply between the layers
of the MLP head of the encoder
attention_dropout: default 0.1, the dropout rate to apply in the
MultiHeadAttention layer
activation: default 'tf.activations.gelu', the activation function to
apply in the MLP head - should be a function
layer_norm_epsilon: default 1e-06, the epsilon for `LayerNormalization`
layers
Basic usage:
```
project_dim = 1024
mlp_dim = 3072
num_heads = 4
encoded_patches = keras_cv.layers.PatchingAndEmbedding(
project_dim=project_dim,
patch_size=16)(img_batch)
trans_encoded = keras_cv.layers.TransformerEncoder(project_dim=project_dim,
mlp_dim = mlp_dim,
num_heads=num_heads)(encoded_patches)
print(trans_encoded.shape) # (1, 197, 1024)
```
"""
def __init__(
self,
project_dim,
num_heads,
mlp_dim,
mlp_dropout=0.1,
attention_dropout=0.1,
activation=keras.activations.gelu,
layer_norm_epsilon=1e-06,
**kwargs,
):
super().__init__(**kwargs)
self.project_dim = project_dim
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.mlp_dropout = mlp_dropout
self.attention_dropout = attention_dropout
self.activation = activation
self.layer_norm_epsilon = layer_norm_epsilon
self.mlp_units = [mlp_dim, project_dim]
self.layer_norm1 = layers.LayerNormalization(
epsilon=self.layer_norm_epsilon
)
self.layer_norm2 = layers.LayerNormalization(
epsilon=self.layer_norm_epsilon
)
self.attn = layers.MultiHeadAttention(
num_heads=self.num_heads,
key_dim=self.project_dim // self.num_heads,
dropout=self.attention_dropout,
)
self.dense1 = layers.Dense(self.mlp_units[0])
self.dense2 = layers.Dense(self.mlp_units[1])
def call(self, inputs):
"""Calls the Transformer Encoder on an input sequence.
Args:
inputs: A `tf.Tensor` of shape [batch, height, width, channels]
Returns:
`A tf.Tensor` of shape [batch, patch_num+1, embedding_dim]
"""
if inputs.shape[-1] != self.project_dim:
raise ValueError(
"The input and output dimensionality must be the same, but the "
f"TransformerEncoder was provided with {inputs.shape[-1]} and "
f"{self.project_dim}"
)
x = self.layer_norm1(inputs)
x = self.attn(x, x)
x = layers.Dropout(self.mlp_dropout)(x)
x = layers.Add()([x, inputs])
y = self.layer_norm2(x)
y = self.dense1(y)
if self.activation == keras.activations.gelu:
y = self.activation(y, approximate=True)
else:
y = self.activation(y)
y = layers.Dropout(self.mlp_dropout)(y)
y = self.dense2(y)
y = layers.Dropout(self.mlp_dropout)(y)
output = layers.Add()([x, y])
return output
def get_config(self):
config = super().get_config()
activation = self.activation
if not isinstance(activation, (str, dict)):
activation = keras.activations.serialize(activation)
config.update(
{
"project_dim": self.project_dim,
"mlp_dim": self.mlp_dim,
"num_heads": self.num_heads,
"attention_dropout": self.attention_dropout,
"mlp_dropout": self.mlp_dropout,
"activation": activation,
"layer_norm_epsilon": self.layer_norm_epsilon,
}
)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
activation = config.pop("activation")
if isinstance(activation, (str, dict)):
activation = keras.activations.deserialize(activation)
return cls(activation=activation, **config)
| keras-cv/keras_cv/layers/transformer_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/layers/transformer_encoder.py",
"repo_id": "keras-cv",
"token_count": 2358
} | 17 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.losses.iou_loss import IoULoss
from keras_cv.tests.test_case import TestCase
class IoUTest(TestCase):
def test_output_shape(self):
y_true = np.random.uniform(size=(2, 2, 4), low=0, high=10)
y_pred = np.random.uniform(size=(2, 2, 4), low=0, high=20)
iou_loss = IoULoss(bounding_box_format="xywh")
self.assertAllEqual(iou_loss(y_true, y_pred).shape, ())
def test_output_shape_reduction_none(self):
y_true = np.random.uniform(size=(2, 2, 4), low=0, high=10)
y_pred = np.random.uniform(size=(2, 2, 4), low=0, high=20)
iou_loss = IoULoss(bounding_box_format="xywh", reduction="none")
self.assertAllEqual(
iou_loss(y_true, y_pred).shape,
[
2,
],
)
def test_output_shape_relative(self):
y_true = [
[0.0, 0.0, 0.1, 0.1],
[0.0, 0.0, 0.2, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.2, 0.3, 0.3],
]
y_pred = [
[0.0, 0.0, 0.5, 0.6],
[0.0, 0.0, 0.7, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.1, 0.3, 0.3],
]
iou_loss = IoULoss(bounding_box_format="rel_xyxy")
self.assertAllEqual(iou_loss(y_true, y_pred).shape, ())
def test_output_value(self):
y_true = [
[0, 0, 1, 1],
[0, 0, 2, 3],
[4, 5, 3, 6],
[2, 2, 3, 3],
]
y_pred = [
[0, 0, 5, 6],
[0, 0, 7, 3],
[4, 5, 5, 6],
[2, 1, 3, 3],
]
iou_loss = IoULoss(bounding_box_format="xywh")
# -log(compute_iou(y_true, y_pred)) = 1.0363084
self.assertAllClose(iou_loss(y_true, y_pred), 1.0363084)
| keras-cv/keras_cv/losses/iou_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/iou_loss_test.py",
"repo_id": "keras-cv",
"token_count": 1225
} | 18 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.models import legacy
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetLBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetMBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetSBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetTinyBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetXLBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone import (
CSPDarkNetBackbone,
)
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet121Backbone,
)
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet169Backbone,
)
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet201Backbone,
)
from keras_cv.models.backbones.densenet.densenet_backbone import (
DenseNetBackbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB0Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB1Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB2Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB3Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB4Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_backbone import ( # noqa: E501
EfficientNetLiteBackbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B0Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B1Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B2Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B3Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B4Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B5Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B6Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B7Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B0Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B1Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B2Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B3Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2LBackbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2MBackbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2SBackbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB0Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB1Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB2Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB3Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB4Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB5Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTBackbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_aliases import (
MobileNetV3LargeBackbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_aliases import (
MobileNetV3SmallBackbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone import (
MobileNetV3Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet18Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet34Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet50Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet101Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet152Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
ResNetBackbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet18V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet34V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet50V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet101V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet152V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (
ResNetV2Backbone,
)
from keras_cv.models.backbones.vgg16.vgg16_backbone import VGG16Backbone
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetBBackbone
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetHBackbone
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetLBackbone
from keras_cv.models.backbones.vit_det.vit_det_backbone import ViTDetBackbone
from keras_cv.models.classification.image_classifier import ImageClassifier
from keras_cv.models.feature_extractor.clip import CLIP
from keras_cv.models.object_detection.retinanet.retinanet import RetinaNet
from keras_cv.models.object_detection.yolo_v8.yolo_v8_backbone import (
YOLOV8Backbone,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_detector import (
YOLOV8Detector,
)
from keras_cv.models.segmentation import BASNet
from keras_cv.models.segmentation import DeepLabV3Plus
from keras_cv.models.segmentation import SAMMaskDecoder
from keras_cv.models.segmentation import SAMPromptEncoder
from keras_cv.models.segmentation import SegmentAnythingModel
from keras_cv.models.segmentation import TwoWayTransformer
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormer
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB0
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB1
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB2
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB3
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB4
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB5
from keras_cv.models.stable_diffusion import StableDiffusion
from keras_cv.models.stable_diffusion import StableDiffusionV2
| keras-cv/keras_cv/models/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/models/__init__.py",
"repo_id": "keras-cv",
"token_count": 3123
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet121Backbone,
)
from keras_cv.models.backbones.densenet.densenet_backbone import (
DenseNetBackbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class DenseNetBackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_valid_call(self):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_applications_model(self):
model = DenseNet121Backbone()
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
include_rescaling=False,
)
model_output = model(self.input_batch)
save_path = os.path.join(self.get_temp_dir(), "densenet_backbone.keras")
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, DenseNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = DenseNet121Backbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "densenet_alias_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, DenseNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = DenseNet121Backbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 256),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 512),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 1024),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 1024),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
input_shape=(None, None, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, None, None, 1024))
| keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 2063
} | 20 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNetV2 model preset configurations."""
backbone_presets_no_weights = {
"efficientnetv2_s": {
"metadata": {
"description": (
"EfficientNet architecture with 6 convolutional blocks."
),
"params": 20331360,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_s/2", # noqa: E501
},
"efficientnetv2_m": {
"metadata": {
"description": (
"EfficientNet architecture with 7 convolutional blocks."
),
"params": 53150388,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_m/2", # noqa: E501
},
"efficientnetv2_l": {
"metadata": {
"description": (
"EfficientNet architecture with 7 convolutional "
"blocks, but more filters the in `efficientnetv2_m`."
),
"params": 117746848,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_l/2", # noqa: E501
},
"efficientnetv2_b0": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.0`."
),
"params": 5919312,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b0/2", # noqa: E501
},
"efficientnetv2_b1": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.1`."
),
"params": 6931124,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b1/2", # noqa: E501
},
"efficientnetv2_b2": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.1` and `depth_coefficient=1.2`."
),
"params": 8769374,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b2/2", # noqa: E501
},
"efficientnetv2_b3": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.2` and `depth_coefficient=1.4`."
),
"params": 12930622,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b3/2", # noqa: E501
},
}
backbone_presets_with_weights = {
"efficientnetv2_s_imagenet": {
"metadata": {
"description": (
"EfficientNet architecture with 6 convolutional "
"blocks. Weights are initialized to pretrained imagenet "
"classification weights.Published weights are capable of "
"scoring 83.9%top 1 accuracy "
"and 96.7% top 5 accuracy on imagenet."
),
"params": 20331360,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_s_imagenet/2", # noqa: E501
},
"efficientnetv2_b0_imagenet": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.0`. "
"Weights are "
"initialized to pretrained imagenet classification weights. "
"Published weights are capable of scoring 77.1% top 1 accuracy "
"and 93.3% top 5 accuracy on imagenet."
),
"params": 5919312,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b0_imagenet/2", # noqa: E501
},
"efficientnetv2_b1_imagenet": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.1`. "
"Weights are "
"initialized to pretrained imagenet classification weights."
"Published weights are capable of scoring 79.1% top 1 accuracy "
"and 94.4% top 5 accuracy on imagenet."
),
"params": 6931124,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b1_imagenet/2", # noqa: E501
},
"efficientnetv2_b2_imagenet": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.1` and `depth_coefficient=1.2`. "
"Weights are initialized to pretrained "
"imagenet classification weights."
"Published weights are capable of scoring 80.1% top 1 accuracy "
"and 94.9% top 5 accuracy on imagenet."
),
"params": 8769374,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b2_imagenet/2", # noqa: E501
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 3507
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
ResNetBackbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone_presets import (
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """ResNetBackbone (V1) model with {num_layers} layers.
Reference:
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
The difference in ResNetV1 and ResNetV2 rests in the structure of their
individual building blocks. In ResNetV2, the batch normalization and
ReLU activation precede the convolution layers, as opposed to ResNetV1 where
the batch normalization and ReLU activation are applied after the
convolution layers.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = ResNet{num_layers}Backbone()
output = model(input_data)
```
""" # noqa: E501
@keras_cv_export("keras_cv.models.ResNet18Backbone")
class ResNet18Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet18", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet34Backbone")
class ResNet34Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet34", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet50Backbone")
class ResNet50Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet50", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"resnet50_imagenet": copy.deepcopy(
backbone_presets["resnet50_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.models.ResNet101Backbone")
class ResNet101Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet101", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet152Backbone")
class ResNet152Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet152", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(ResNet18Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=18))
setattr(ResNet34Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=34))
setattr(ResNet50Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=50))
setattr(ResNet101Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=101))
setattr(ResNet152Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=152))
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_aliases.py",
"repo_id": "keras-cv",
"token_count": 2951
} | 22 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CLIP presets."""
clip_presets = {
"clip-vit-base-patch16": {
"metadata": {
"description": (
"The model uses a ViT-B/16 Transformer architecture as an "
"image encoder and uses a masked self-attention Transformer as "
"a text encoder. These encoders are trained to maximize the "
"similarity of (image, text) pairs via a contrastive loss. The "
"model uses a patch size of 16 and input images of size (224, "
"224)"
),
"params": 149620737,
"official_name": "CLIP",
"path": "clip",
},
"kaggle_handle": "kaggle://keras/clip/keras/clip-vit-base-patch16/2",
},
"clip-vit-base-patch32": {
"metadata": {
"description": (
"The model uses a ViT-B/32 Transformer architecture as an "
"image encoder and uses a masked self-attention Transformer as "
"a text encoder. These encoders are trained to maximize the "
"similarity of (image, text) pairs via a contrastive loss.The "
"model uses a patch size of 32 and input images of size (224, "
"224)"
),
"params": 151277313,
"official_name": "CLIP",
"path": "clip",
},
"kaggle_handle": "kaggle://keras/clip/keras/clip-vit-base-patch32/2",
},
"clip-vit-large-patch14": {
"metadata": {
"description": (
"The model uses a ViT-L/14 Transformer architecture as an "
"image encoder and uses a masked self-attention Transformer as "
"a text encoder. These encoders are trained to maximize the "
"similarity of (image, text) pairs via a contrastive loss.The "
"model uses a patch size of 14 and input images of size (224, "
"224)"
),
"params": 427616513,
"official_name": "CLIP",
"path": "clip",
},
"kaggle_handle": "kaggle://keras/clip/keras/clip-vit-large-patch14/2",
},
"clip-vit-large-patch14-336": {
"metadata": {
"description": (
"The model uses a ViT-L/14 Transformer architecture as an "
"image encoder and uses a masked self-attention Transformer as "
"a text encoder. These encoders are trained to maximize the "
"similarity of (image, text) pairs via a contrastive loss.The "
"model uses a patch size of 14 and input images of size (336, "
"336)"
),
"params": 427944193,
"official_name": "CLIP",
"path": "clip",
},
"kaggle_handle": "kaggle://keras/clip/keras/clip-vit-large-patch14-336/2", # noqa: E501
},
}
| keras-cv/keras_cv/models/feature_extractor/clip/clip_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_presets.py",
"repo_id": "keras-cv",
"token_count": 1592
} | 23 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import math
import numpy as np
import tensorflow as tf
try:
import pandas as pd
except ImportError:
pd = None
def unpack_input(data):
if type(data) is dict:
return data["images"], data["bounding_boxes"]
else:
return data
def _get_tensor_types():
if pd is None:
return (tf.Tensor, np.ndarray)
else:
return (tf.Tensor, np.ndarray, pd.Series, pd.DataFrame)
def convert_inputs_to_tf_dataset(
x=None, y=None, sample_weight=None, batch_size=None
):
if sample_weight is not None:
raise ValueError("RetinaNet does not yet support `sample_weight`.")
if isinstance(x, tf.data.Dataset):
if y is not None or batch_size is not None:
raise ValueError(
"When `x` is a `tf.data.Dataset`, please do not provide a "
f"value for `y` or `batch_size`. Got `y={y}`, "
f"`batch_size={batch_size}`."
)
return x
# batch_size defaults to 32, as it does in fit().
batch_size = batch_size or 32
# Parse inputs
inputs = x
if y is not None:
inputs = (x, y)
# Construct tf.data.Dataset
dataset = tf.data.Dataset.from_tensor_slices(inputs)
if batch_size == "full":
dataset = dataset.batch(x.shape[0])
elif batch_size is not None:
dataset = dataset.batch(batch_size)
return dataset
# TODO(lukewood): remove once exported from Keras core.
def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to
include in the validation split. The rest of the dataset will be
included in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
def _can_split(t):
tensor_types = _get_tensor_types()
return isinstance(t, tensor_types) or t is None
flat_arrays = tf.nest.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"`validation_split` is only supported for Tensors or NumPy "
"arrays, found following types in the input: {}".format(unsplitable)
)
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1.0 - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
"Training data contains {batch_dim} samples, which is not "
"sufficient to split it into a validation and training set as "
"specified by `validation_split={validation_split}`. Either "
"provide more data, or a different value for the "
"`validation_split` argument.".format(
batch_dim=batch_dim, validation_split=validation_split
)
)
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = tf.nest.map_structure(
functools.partial(_split, start=0, end=split_at), arrays
)
val_arrays = tf.nest.map_structure(
functools.partial(_split, start=split_at, end=batch_dim), arrays
)
return train_arrays, val_arrays
| keras-cv/keras_cv/models/object_detection/__internal__.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/__internal__.py",
"repo_id": "keras-cv",
"token_count": 1721
} | 24 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
import keras_cv
from keras_cv import bounding_box
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.test_backbone_presets import (
test_backbone_presets,
)
from keras_cv.models.object_detection.__test_utils__ import (
_create_bounding_box_dataset,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_detector_presets import (
yolo_v8_detector_presets,
)
from keras_cv.tests.test_case import TestCase
class YOLOV8DetectorTest(TestCase):
@pytest.mark.large # Fit is slow, so mark these large.
def test_fit(self):
bounding_box_format = "xywh"
yolo = keras_cv.models.YOLOV8Detector(
num_classes=2,
fpn_depth=1,
bounding_box_format=bounding_box_format,
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone"
),
)
yolo.compile(
optimizer="adam",
classification_loss="binary_crossentropy",
box_loss="ciou",
)
xs, ys = _create_bounding_box_dataset(bounding_box_format)
yolo.fit(x=xs, y=ys, epochs=1)
@pytest.mark.tf_keras_only
@pytest.mark.large # Fit is slow, so mark these large.
def test_fit_with_ragged_tensors(self):
bounding_box_format = "xywh"
yolo = keras_cv.models.YOLOV8Detector(
num_classes=2,
fpn_depth=1,
bounding_box_format=bounding_box_format,
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone"
),
)
yolo.compile(
optimizer="adam",
classification_loss="binary_crossentropy",
box_loss="ciou",
)
xs, ys = _create_bounding_box_dataset(bounding_box_format)
ys = bounding_box.to_ragged(ys)
yolo.fit(x=xs, y=ys, epochs=1)
@pytest.mark.large # Fit is slow, so mark these large.
def test_fit_with_no_valid_gt_bbox(self):
bounding_box_format = "xywh"
yolo = keras_cv.models.YOLOV8Detector(
num_classes=1,
fpn_depth=1,
bounding_box_format=bounding_box_format,
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone"
),
)
yolo.compile(
optimizer="adam",
classification_loss="binary_crossentropy",
box_loss="ciou",
)
xs, ys = _create_bounding_box_dataset(bounding_box_format)
# Make all bounding_boxes invalid and filter out them
ys["classes"] = -np.ones_like(ys["classes"])
yolo.fit(x=xs, y=ys, epochs=1)
def test_trainable_weight_count(self):
yolo = keras_cv.models.YOLOV8Detector(
num_classes=2,
fpn_depth=1,
bounding_box_format="xywh",
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_s_backbone"
),
)
self.assertEqual(len(yolo.trainable_weights), 195)
def test_bad_loss(self):
yolo = keras_cv.models.YOLOV8Detector(
num_classes=2,
fpn_depth=1,
bounding_box_format="xywh",
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone"
),
)
with self.assertRaisesRegex(
ValueError,
"Invalid box loss",
):
yolo.compile(
box_loss="bad_loss", classification_loss="binary_crossentropy"
)
with self.assertRaisesRegex(
ValueError,
"Invalid classification loss",
):
yolo.compile(box_loss="ciou", classification_loss="bad_loss")
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = keras_cv.models.YOLOV8Detector(
num_classes=20,
bounding_box_format="xywh",
fpn_depth=1,
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone"
),
)
xs, _ = _create_bounding_box_dataset("xywh")
model_output = model(xs)
save_path = os.path.join(
self.get_temp_dir(), "yolo_v8_xs_detector.keras"
)
model.save(save_path)
# TODO: Remove the need to pass the `custom_objects` parameter.
restored_model = keras.saving.load_model(
save_path,
custom_objects={"YOLOV8Detector": keras_cv.models.YOLOV8Detector},
)
# Check we got the real object back.
self.assertIsInstance(restored_model, keras_cv.models.YOLOV8Detector)
# Check that output matches.
restored_output = restored_model(xs)
self.assertAllClose(
ops.convert_to_numpy(model_output["boxes"]),
ops.convert_to_numpy(restored_output["boxes"]),
)
self.assertAllClose(
ops.convert_to_numpy(model_output["classes"]),
ops.convert_to_numpy(restored_output["classes"]),
)
# TODO(tirthasheshpatel): Support updating prediction decoder in Keras Core.
@pytest.mark.tf_keras_only
def test_update_prediction_decoder(self):
yolo = keras_cv.models.YOLOV8Detector(
num_classes=2,
fpn_depth=1,
bounding_box_format="xywh",
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_s_backbone"
),
prediction_decoder=keras_cv.layers.NonMaxSuppression(
bounding_box_format="xywh",
from_logits=False,
confidence_threshold=0.0,
iou_threshold=1.0,
),
)
image = np.ones((1, 512, 512, 3))
outputs = yolo.predict(image)
# We predicted at least 1 box with confidence_threshold 0
self.assertGreater(outputs["boxes"].shape[0], 0)
yolo.prediction_decoder = keras_cv.layers.NonMaxSuppression(
bounding_box_format="xywh",
from_logits=False,
confidence_threshold=1.0,
iou_threshold=1.0,
)
outputs = yolo.predict(image)
# We predicted no boxes with confidence threshold 1
self.assertAllEqual(outputs["boxes"], -np.ones_like(outputs["boxes"]))
self.assertAllEqual(
outputs["confidence"], -np.ones_like(outputs["confidence"])
)
self.assertAllEqual(
outputs["classes"], -np.ones_like(outputs["classes"])
)
@pytest.mark.large
class YOLOV8DetectorSmokeTest(TestCase):
@parameterized.named_parameters(
*[(preset, preset) for preset in test_backbone_presets]
)
@pytest.mark.extra_large
def test_backbone_preset(self, preset):
model = keras_cv.models.YOLOV8Detector.from_preset(
preset,
num_classes=20,
bounding_box_format="xywh",
)
xs, _ = _create_bounding_box_dataset(bounding_box_format="xywh")
output = model(xs)
# 64 represents number of parameters in a box
# 5376 is the number of anchors for a 512x512 image
self.assertEqual(output["boxes"].shape, (xs.shape[0], 5376, 64))
def test_preset_with_forward_pass(self):
model = keras_cv.models.YOLOV8Detector.from_preset(
"yolo_v8_m_pascalvoc",
bounding_box_format="xywh",
)
image = np.ones((1, 512, 512, 3))
encoded_predictions = model(image)
self.assertAllClose(
ops.convert_to_numpy(encoded_predictions["boxes"][0, 0:5, 0]),
[-0.8303556, 0.75213313, 1.809204, 1.6576759, 1.4134747],
)
self.assertAllClose(
ops.convert_to_numpy(encoded_predictions["classes"][0, 0:5, 0]),
[
7.6146556e-08,
8.0103280e-07,
9.7873999e-07,
2.2314548e-06,
2.5051115e-06,
],
)
@pytest.mark.extra_large
class YOLOV8DetectorPresetFullTest(TestCase):
"""
Test the full enumeration of our presets.
This every presets for YOLOV8Detector and is only run manually.
Run with:
`pytest keras_cv/models/object_detection/yolo_v8/yolo_v8_detector_test.py --run_extra_large`
""" # noqa: E501
def test_load_yolo_v8_detector(self):
input_data = np.ones(shape=(2, 224, 224, 3))
for preset in yolo_v8_detector_presets:
model = keras_cv.models.YOLOV8Detector.from_preset(
preset, bounding_box_format="xywh"
)
model(input_data)
| keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector_test.py",
"repo_id": "keras-cv",
"token_count": 4662
} | 25 |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 37