text
stringlengths
5
45.8k
id
stringlengths
18
93
metadata
dict
__index_level_0__
int64
0
33
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import numpy as np from keras_cv import bounding_box from keras_cv import layers as cv_layers from keras_cv import losses from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.bounding_box.converters import _decode_deltas_to_boxes from keras_cv.models.backbones.backbone_presets import backbone_presets from keras_cv.models.backbones.backbone_presets import ( backbone_presets_with_weights, ) from keras_cv.models.object_detection.__internal__ import unpack_input from keras_cv.models.object_detection.retinanet import FeaturePyramid from keras_cv.models.object_detection.retinanet import PredictionHead from keras_cv.models.object_detection.retinanet import RetinaNetLabelEncoder from keras_cv.models.object_detection.retinanet.retinanet_presets import ( retinanet_presets, ) from keras_cv.models.task import Task from keras_cv.utils.python_utils import classproperty from keras_cv.utils.train import get_feature_extractor BOX_VARIANCE = [0.1, 0.1, 0.2, 0.2] @keras_cv_export( ["keras_cv.models.RetinaNet", "keras_cv.models.object_detection.RetinaNet"] ) class RetinaNet(Task): """A Keras model implementing the RetinaNet meta-architecture. Implements the RetinaNet architecture for object detection. The constructor requires `num_classes`, `bounding_box_format`, and a backbone. Optionally, a custom label encoder, and prediction decoder may be provided. Examples: ```python images = np.ones((1, 512, 512, 3)) labels = { "boxes": tf.cast([ [ [0, 0, 100, 100], [100, 100, 200, 200], [300, 300, 100, 100], ] ], dtype=tf.float32), "classes": tf.cast([[1, 1, 1]], dtype=tf.float32), } model = keras_cv.models.RetinaNet( num_classes=20, bounding_box_format="xywh", backbone=keras_cv.models.ResNet50Backbone.from_preset( "resnet50_imagenet" ) ) # Evaluate model without box decoding and NMS model(images) # Prediction with box decoding and NMS model.predict(images) # Train model model.compile( classification_loss='focal', box_loss='smoothl1', optimizer=keras.optimizers.SGD(global_clipnorm=10.0), jit_compile=False, ) model.fit(images, labels) ``` Args: num_classes: the number of classes in your dataset excluding the background class. Classes should be represented by integers in the range [0, num_classes). bounding_box_format: The format of bounding boxes of input dataset. Refer [to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/) for more details on supported bounding box formats. backbone: `keras.Model`. If the default `feature_pyramid` is used, must implement the `pyramid_level_inputs` property with keys "P3", "P4", and "P5" and layer names as values. A somewhat sensible backbone to use in many cases is the: `keras_cv.models.ResNetBackbone.from_preset("resnet50_imagenet")` anchor_generator: (Optional) a `keras_cv.layers.AnchorGenerator`. If provided, the anchor generator will be passed to both the `label_encoder` and the `prediction_decoder`. Only to be used when both `label_encoder` and `prediction_decoder` are both `None`. Defaults to an anchor generator with the parameterization: `strides=[2**i for i in range(3, 8)]`, `scales=[2**x for x in [0, 1 / 3, 2 / 3]]`, `sizes=[32.0, 64.0, 128.0, 256.0, 512.0]`, and `aspect_ratios=[0.5, 1.0, 2.0]`. label_encoder: (Optional) a keras.Layer that accepts an image Tensor, a bounding box Tensor and a bounding box class Tensor to its `call()` method, and returns RetinaNet training targets. By default, a KerasCV standard `RetinaNetLabelEncoder` is created and used. Results of this object's `call()` method are passed to the `loss` object for `box_loss` and `classification_loss` the `y_true` argument. prediction_decoder: (Optional) A `keras.layers.Layer` that is responsible for transforming RetinaNet predictions into usable bounding box Tensors. If not provided, a default is provided. The default `prediction_decoder` layer is a `keras_cv.layers.MultiClassNonMaxSuppression` layer, which uses a Non-Max Suppression for box pruning. feature_pyramid: (Optional) A `keras.layers.Layer` that produces a list of 4D feature maps (batch dimension included) when called on the pyramid-level outputs of the `backbone`. If not provided, the reference implementation from the paper will be used. classification_head: (Optional) A `keras.Layer` that performs classification of the bounding boxes. If not provided, a simple ConvNet with 3 layers will be used. box_head: (Optional) A `keras.Layer` that performs regression of the bounding boxes. If not provided, a simple ConvNet with 3 layers will be used. """ # noqa: E501 def __init__( self, backbone, num_classes, bounding_box_format, anchor_generator=None, label_encoder=None, prediction_decoder=None, feature_pyramid=None, classification_head=None, box_head=None, **kwargs, ): if anchor_generator is not None and label_encoder is not None: raise ValueError( "`anchor_generator` is only to be provided when " "`label_encoder` is `None`. Received `anchor_generator=" f"{anchor_generator}`, label_encoder={label_encoder}`. To " "customize the behavior of the anchor_generator inside of a " "custom `label_encoder` you should provide both to `RetinaNet`" "provide both to `RetinaNet`, and ensure that the " "`anchor_generator` provided to both is identical" ) if label_encoder is None: anchor_generator = ( anchor_generator or RetinaNet.default_anchor_generator(bounding_box_format) ) label_encoder = RetinaNetLabelEncoder( bounding_box_format=bounding_box_format, anchor_generator=anchor_generator, box_variance=BOX_VARIANCE, ) extractor_levels = ["P3", "P4", "P5"] extractor_layer_names = [ backbone.pyramid_level_inputs[i] for i in extractor_levels ] feature_extractor = get_feature_extractor( backbone, extractor_layer_names, extractor_levels ) feature_pyramid = feature_pyramid or FeaturePyramid() prior_probability = keras.initializers.Constant( -np.log((1 - 0.01) / 0.01) ) classification_head = classification_head or PredictionHead( output_filters=9 * num_classes, bias_initializer=prior_probability, ) box_head = box_head or PredictionHead( output_filters=9 * 4, bias_initializer=keras.initializers.Zeros() ) # Begin construction of forward pass images = keras.layers.Input( feature_extractor.input_shape[1:], name="images" ) backbone_outputs = feature_extractor(images) features = feature_pyramid(backbone_outputs) cls_pred = [] box_pred = [] for feature in features: box_pred.append(keras.layers.Reshape((-1, 4))(box_head(feature))) cls_pred.append( keras.layers.Reshape((-1, num_classes))( classification_head(feature) ) ) cls_pred = keras.layers.Concatenate(axis=1, name="classification")( cls_pred ) box_pred = keras.layers.Concatenate(axis=1, name="box")(box_pred) # box_pred is always in "center_yxhw" delta-encoded no matter what # format you pass in. inputs = {"images": images} outputs = {"box": box_pred, "classification": cls_pred} super().__init__( inputs=inputs, outputs=outputs, **kwargs, ) self.label_encoder = label_encoder self.anchor_generator = label_encoder.anchor_generator self.bounding_box_format = bounding_box_format self.num_classes = num_classes self.backbone = backbone self.feature_extractor = feature_extractor self._prediction_decoder = ( prediction_decoder or cv_layers.NonMaxSuppression( bounding_box_format=bounding_box_format, from_logits=True, ) ) self.feature_pyramid = feature_pyramid self.classification_head = classification_head self.box_head = box_head self.build(backbone.input_shape) def predict_step(self, *args): outputs = super().predict_step(*args) if type(outputs) is tuple: return self.decode_predictions(outputs[0], args[-1]), outputs[1] else: return self.decode_predictions(outputs, args[-1]) @property def prediction_decoder(self): return self._prediction_decoder @prediction_decoder.setter def prediction_decoder(self, prediction_decoder): if prediction_decoder.bounding_box_format != self.bounding_box_format: raise ValueError( "Expected `prediction_decoder` and RetinaNet to " "use the same `bounding_box_format`, but got " "`prediction_decoder.bounding_box_format=" f"{prediction_decoder.bounding_box_format}`, and " "`self.bounding_box_format=" f"{self.bounding_box_format}`." ) self._prediction_decoder = prediction_decoder self.make_predict_function(force=True) self.make_train_function(force=True) self.make_test_function(force=True) @staticmethod def default_anchor_generator(bounding_box_format): strides = [2**i for i in range(3, 8)] scales = [2**x for x in [0, 1 / 3, 2 / 3]] sizes = [32.0, 64.0, 128.0, 256.0, 512.0] aspect_ratios = [0.5, 1.0, 2.0] return cv_layers.AnchorGenerator( bounding_box_format=bounding_box_format, sizes=sizes, aspect_ratios=aspect_ratios, scales=scales, strides=strides, clip_boxes=True, ) def decode_predictions(self, predictions, images): box_pred, cls_pred = predictions["box"], predictions["classification"] # box_pred is on "center_yxhw" format, convert to target format. image_shape = tuple(images[0].shape) anchors = self.anchor_generator(image_shape=image_shape) anchors = ops.concatenate([a for a in anchors.values()], axis=0) box_pred = _decode_deltas_to_boxes( anchors=anchors, boxes_delta=box_pred, anchor_format=self.anchor_generator.bounding_box_format, box_format=self.bounding_box_format, variance=BOX_VARIANCE, image_shape=image_shape, ) # box_pred is now in "self.bounding_box_format" format box_pred = bounding_box.convert_format( box_pred, source=self.bounding_box_format, target=self.prediction_decoder.bounding_box_format, image_shape=image_shape, ) y_pred = self.prediction_decoder( box_pred, cls_pred, image_shape=image_shape ) y_pred["boxes"] = bounding_box.convert_format( y_pred["boxes"], source=self.prediction_decoder.bounding_box_format, target=self.bounding_box_format, image_shape=image_shape, ) return y_pred def compile( self, box_loss=None, classification_loss=None, loss=None, metrics=None, **kwargs, ): """compiles the RetinaNet. compile() mirrors the standard Keras compile() method, but has a few key distinctions. Primarily, all metrics must support bounding boxes, and two losses must be provided: `box_loss` and `classification_loss`. Args: box_loss: a Keras loss to use for box offset regression. Preconfigured losses are provided when the string "huber" or "smoothl1" are passed. classification_loss: a Keras loss to use for box classification. A preconfigured `FocalLoss` is provided when the string "focal" is passed. weight_decay: a float for variable weight decay. metrics: KerasCV object detection metrics that accept decoded bounding boxes as their inputs. Examples of this metric type are `keras_cv.metrics.BoxRecall()` and `keras_cv.metrics.BoxMeanAveragePrecision()`. When `metrics` are included in the call to `compile()`, the RetinaNet will perform non-max suppression decoding during the forward pass. By default, the RetinaNet uses a `keras_cv.layers.MultiClassNonMaxSuppression()` layer to perform decoding. This behavior can be customized by passing in a `prediction_decoder` to the constructor or by modifying the `prediction_decoder` attribute on the model. It should be noted that the default non-max suppression operation does not have TPU support, and thus when training on TPU metrics must be evaluated in a `keras.utils.SidecarEvaluator` or a `keras.callbacks.Callback`. kwargs: most other `keras.Model.compile()` arguments are supported and propagated to the `keras.Model` class. """ if loss is not None: raise ValueError( "`RetinaNet` does not accept a `loss` to `compile()`. " "Instead, please pass `box_loss` and `classification_loss`. " "`loss` will be ignored during training." ) box_loss = _parse_box_loss(box_loss) classification_loss = _parse_classification_loss(classification_loss) if hasattr(classification_loss, "from_logits"): if not classification_loss.from_logits: raise ValueError( "RetinaNet.compile() expects `from_logits` to be True for " "`classification_loss`. Got " "`classification_loss.from_logits=" f"{classification_loss.from_logits}`" ) if hasattr(box_loss, "bounding_box_format"): if box_loss.bounding_box_format != self.bounding_box_format: raise ValueError( "Wrong `bounding_box_format` passed to `box_loss` in " "`RetinaNet.compile()`. Got " "`box_loss.bounding_box_format=" f"{box_loss.bounding_box_format}`, want " "`box_loss.bounding_box_format=" f"{self.bounding_box_format}`" ) self.box_loss = box_loss self.classification_loss = classification_loss losses = { "box": self.box_loss, "classification": self.classification_loss, } self._has_user_metrics = metrics is not None and len(metrics) != 0 self._user_metrics = metrics super().compile(loss=losses, **kwargs) def compute_loss(self, x, y, y_pred, sample_weight, **kwargs): y_for_label_encoder = bounding_box.convert_format( y, source=self.bounding_box_format, target=self.label_encoder.bounding_box_format, images=x, ) boxes, classes = self.label_encoder(x, y_for_label_encoder) box_pred = y_pred["box"] cls_pred = y_pred["classification"] if boxes.shape[-1] != 4: raise ValueError( "boxes should have shape (None, None, 4). Got " f"boxes.shape={tuple(boxes.shape)}" ) if box_pred.shape[-1] != 4: raise ValueError( "box_pred should have shape (None, None, 4). Got " f"box_pred.shape={tuple(box_pred.shape)}. Does your model's " "`num_classes` parameter match your losses `num_classes` " "parameter?" ) if cls_pred.shape[-1] != self.num_classes: raise ValueError( "cls_pred should have shape (None, None, 4). Got " f"cls_pred.shape={tuple(cls_pred.shape)}. Does your model's " "`num_classes` parameter match your losses `num_classes` " "parameter?" ) cls_labels = ops.one_hot( ops.cast(classes, "int32"), self.num_classes, dtype="float32" ) positive_mask = ops.cast(ops.greater(classes, -1.0), dtype="float32") normalizer = ops.sum(positive_mask) cls_weights = ops.cast(ops.not_equal(classes, -2.0), dtype="float32") cls_weights /= normalizer box_weights = positive_mask / normalizer y_true = { "box": boxes, "classification": cls_labels, } sample_weights = { "box": box_weights, "classification": cls_weights, } zero_weight = { "box": ops.zeros_like(box_weights), "classification": ops.zeros_like(cls_weights), } sample_weights = ops.cond( normalizer == 0, lambda: zero_weight, lambda: sample_weights, ) return super().compute_loss( x=x, y=y_true, y_pred=y_pred, sample_weight=sample_weights ) def train_step(self, *args): data = args[-1] args = args[:-1] x, y = unpack_input(data) return super().train_step(*args, (x, y)) def test_step(self, *args): data = args[-1] args = args[:-1] x, y = unpack_input(data) return super().test_step(*args, (x, y)) def compute_metrics(self, x, y, y_pred, sample_weight): metrics = {} metrics.update(super().compute_metrics(x, {}, {}, sample_weight={})) if not self._has_user_metrics: return metrics y_pred = self.decode_predictions(y_pred, x) for metric in self._user_metrics: metric.update_state(y, y_pred, sample_weight=sample_weight) for metric in self._user_metrics: result = metric.result() if isinstance(result, dict): metrics.update(result) else: metrics[metric.name] = result return metrics def get_config(self): return { "num_classes": self.num_classes, "bounding_box_format": self.bounding_box_format, "backbone": keras.saving.serialize_keras_object(self.backbone), "label_encoder": keras.saving.serialize_keras_object( self.label_encoder ), "prediction_decoder": self._prediction_decoder, "classification_head": keras.saving.serialize_keras_object( self.classification_head ), "box_head": keras.saving.serialize_keras_object(self.box_head), } @classmethod def from_config(cls, config): if "box_head" in config and isinstance(config["box_head"], dict): config["box_head"] = keras.layers.deserialize(config["box_head"]) if "classification_head" in config and isinstance( config["classification_head"], dict ): config["classification_head"] = keras.layers.deserialize( config["classification_head"] ) if "label_encoder" in config and isinstance( config["label_encoder"], dict ): config["label_encoder"] = keras.layers.deserialize( config["label_encoder"] ) if "prediction_decoder" in config and isinstance( config["prediction_decoder"], dict ): config["prediction_decoder"] = keras.layers.deserialize( config["prediction_decoder"] ) return super().from_config(config) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return copy.deepcopy({**backbone_presets, **retinanet_presets}) @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return copy.deepcopy( {**backbone_presets_with_weights, **retinanet_presets} ) @classproperty def backbone_presets(cls): """Dictionary of preset names and configurations of compatible backbones.""" return copy.deepcopy(backbone_presets) def _parse_box_loss(loss): if not isinstance(loss, str): # support arbitrary callables return loss # case insensitive comparison if loss.lower() == "smoothl1": return losses.SmoothL1Loss(l1_cutoff=1.0, reduction="sum") if loss.lower() == "huber": return keras.losses.Huber(reduction="sum") raise ValueError( "Expected `box_loss` to be either a Keras Loss, " f"callable, or the string 'SmoothL1'. Got loss={loss}." ) def _parse_classification_loss(loss): if not isinstance(loss, str): # support arbitrary callables return loss # case insensitive comparison if loss.lower() == "focal": return losses.FocalLoss(from_logits=True, reduction="sum") raise ValueError( "Expected `classification_loss` to be either a Keras Loss, " f"callable, or the string 'Focal'. Got loss={loss}." )
keras-cv/keras_cv/models/object_detection/retinanet/retinanet.py/0
{ "file_path": "keras-cv/keras_cv/models/object_detection/retinanet/retinanet.py", "repo_id": "keras-cv", "token_count": 10496 }
23
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from tensorflow import keras import keras_cv.layers as cv_layers from keras_cv import bounding_box class YoloXPredictionDecoder(keras.layers.Layer): """Decodes the predictions from YoloX head. This layer is similar to the decoding code in `YoloX.compute_losses`. This is followed by a bounding box suppression layer. Arguments: bounding_box_format: The format of bounding boxes of input dataset. Refer to https://keras.io/api/keras_cv/bounding_box/formats/ for more details on supported bounding box formats. num_classes: The number of classes to be considered for the classification head. suppression_layer: A `keras.layers.Layer` that follows the same API signature of the `keras_cv.layers.MultiClassNonMaxSuppression` layer. This layer should perform a suppression operation such as Non Max Suppression, or Soft Non-Max Suppression. """ def __init__( self, bounding_box_format, num_classes, suppression_layer=None, **kwargs ): super().__init__(**kwargs) self.bounding_box_format = bounding_box_format self.num_classes = num_classes self.suppression_layer = ( suppression_layer or cv_layers.MultiClassNonMaxSuppression( bounding_box_format=bounding_box_format, from_logits=False, confidence_threshold=0.01, iou_threshold=0.65, max_detections=100, max_detections_per_class=100, ) ) if ( self.suppression_layer.bounding_box_format != self.bounding_box_format ): raise ValueError( "`suppression_layer` must have the same `bounding_box_format` " "as the `YoloXPredictionDecoder()` layer. " "Received `YoloXPredictionDecoder.bounding_box_format=" f"{self.bounding_box_format}`, " f"`suppression_layer={suppression_layer}`." ) self.built = True def call(self, images, predictions): image_shape = tf.cast(tf.shape(images), dtype=self.compute_dtype)[1:-1] batch_size = tf.shape(predictions[0])[0] grids = [] strides = [] shapes = [x.shape[1:3] for x in predictions] # 5 + self.num_classes is a concatenation of bounding boxes (length=4) # + objectness score (length=1) + num_classes # this reshape is simply collapsing axes 1 and 2 of x into a single # dimension predictions = [ tf.reshape(x, [batch_size, -1, 5 + self.num_classes]) for x in predictions ] predictions = tf.cast( tf.concat(predictions, axis=1), dtype=self.compute_dtype ) predictions_shape = tf.cast( tf.shape(predictions), dtype=self.compute_dtype ) for i in range(len(shapes)): shape_x, shape_y = shapes[i] grid_x, grid_y = tf.meshgrid(tf.range(shape_y), tf.range(shape_x)) grid = tf.reshape(tf.stack((grid_x, grid_y), 2), (1, -1, 2)) shape = grid.shape[:2] grids.append(tf.cast(grid, self.compute_dtype)) strides.append( tf.ones((shape[0], shape[1], 1)) * image_shape[0] / tf.cast(shape_x, self.compute_dtype) ) grids = tf.concat(grids, axis=1) strides = tf.concat(strides, axis=1) box_xy = tf.expand_dims( (predictions[..., :2] + grids) * strides / image_shape, axis=-2 ) box_xy = tf.broadcast_to( box_xy, [batch_size, predictions_shape[1], self.num_classes, 2] ) box_wh = tf.expand_dims( tf.exp(predictions[..., 2:4]) * strides / image_shape, axis=-2 ) box_wh = tf.broadcast_to( box_wh, [batch_size, predictions_shape[1], self.num_classes, 2] ) box_confidence = tf.math.sigmoid(predictions[..., 4:5]) box_class_probs = tf.math.sigmoid(predictions[..., 5:]) # create and broadcast classes for every box before nms box_classes = tf.expand_dims( tf.range(self.num_classes, dtype=self.compute_dtype), axis=-1 ) box_classes = tf.broadcast_to( box_classes, [batch_size, predictions_shape[1], self.num_classes, 1] ) box_scores = tf.expand_dims(box_confidence * box_class_probs, axis=-1) outputs = tf.concat([box_xy, box_wh, box_classes, box_scores], axis=-1) outputs = tf.reshape(outputs, [batch_size, -1, 6]) outputs = { "boxes": outputs[..., :4], "classes": outputs[..., 4], "confidence": outputs[..., 5], } # this conversion is rel_center_xywh to rel_xywh # small workaround because rel_center_xywh isn't supported yet outputs = bounding_box.convert_format( outputs, source="center_xywh", target="xywh", images=images, ) outputs = bounding_box.convert_format( outputs, source="rel_xywh", target=self.suppression_layer.bounding_box_format, images=images, ) # preparing the predictions for TF NMS op class_predictions = tf.cast(outputs["classes"], tf.int32) class_predictions = tf.one_hot(class_predictions, self.num_classes) scores = ( tf.expand_dims(outputs["confidence"], axis=-1) * class_predictions ) return self.suppression_layer(outputs["boxes"], scores)
keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_decoder.py/0
{ "file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_decoder.py", "repo_id": "keras-cv", "token_count": 2879 }
24
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BASNet model preset configurations.""" from keras_cv.models.backbones.resnet_v1 import resnet_v1_backbone_presets presets_no_weights = { "basnet_resnet18": { "metadata": { "description": "BASNet with a ResNet18 v1 backbone.", "params": 98780872, "official_name": "BASNet", "path": "basnet_resnet18", }, "config": { "backbone": resnet_v1_backbone_presets.backbone_presets["resnet18"], "num_classes": 1, "input_shape": (288, 288, 3), }, }, "basnet_resnet34": { "metadata": { "description": "BASNet with a ResNet34 v1 backbone.", "params": 108896456, "official_name": "BASNet", "path": "basnet_resnet34", }, "config": { "backbone": resnet_v1_backbone_presets.backbone_presets["resnet34"], "num_classes": 1, "input_shape": (288, 288, 3), }, }, } presets_with_weights = { # TODO: Add BASNet preset with weights } basnet_presets = {**presets_no_weights, **presets_with_weights}
keras-cv/keras_cv/models/segmentation/basnet/basnet_presets.py/0
{ "file_path": "keras-cv/keras_cv/models/segmentation/basnet/basnet_presets.py", "repo_id": "keras-cv", "token_count": 723 }
25
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.layers.vit_det_layers import MLP from keras_cv.models.segmentation.segment_anything.sam_transformer import ( TwoWayTransformer, ) @keras_cv_export("keras_cv.models.SAMMaskDecoder", package="keras_cv.models") class SAMMaskDecoder(keras.layers.Layer): """Mask decoder for the Segment Anything Model (SAM). This lightweight module efficiently maps the image embedding and a set of prompt embeddings to an output mask. Before applying the transformer decoder, the layer first inserts into the set of prompt embeddings a learned output token embedding that will be used at the decoder's output. For simplicity, these embeddings (not including the image embedding) are collectively called "tokens". The image embeddings, positional image embeddings, and tokens are passed through a transformer decoder. After running the decoder, the layer upsamples the updated image embedding by 4x with two transposed convolutional layers (now it's downscaled 4x relative to the input image). Then, the tokens attend once more to the image embedding and the updated output token embedding are passed to a small 3-layer MLP that outputs a vector matching the channel dimension of the upscaled image embedding. Finally, a mask is predicted with a spatially point-wise product between the upscaled image embedding and the MLP's output. Args: transformer_dim (int, optional): The number of input features to the transformer decoder. Defaults to `256`. transformer (keras.layers.Layer, optional): A transformer decoder. Defaults to `None`. When `None`, a `keras_cv.models.TwoWayTransformer` layer is used. num_multimask_outputs (int, optional): Number of multimask outputs. The model would generate these many extra masks. The total masks generated by the model are `1 + num_multimask_outputs`. Defaults to `3`. iou_head_depth (int, optional): The depth of the dense net used to predict the IoU confidence score. Defaults to `3`. iou_head_hidden_dim (int, optional): The number of units in the hidden layers used in the dense net to predict the IoU confidence score. Defaults to `256`. activation (str, optional): Activation to use in the mask upscaler network. Defaults to `"gelu"`. References: - [Segment Anything paper](https://arxiv.org/abs/2304.02643) - [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything) """ # noqa: E501 def __init__( self, *, transformer_dim=256, transformer=None, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=256, activation="gelu", **kwargs, ): super().__init__(**kwargs) self.transformer_dim = transformer_dim if transformer is None: transformer = TwoWayTransformer() self.transformer = transformer self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.activation = activation self.iou_token = keras.layers.Embedding(1, transformer_dim) self.num_mask_tokens = num_multimask_outputs + 1 self.mask_tokens = keras.layers.Embedding( self.num_mask_tokens, transformer_dim ) self.output_upscaling = keras.models.Sequential( [ keras.layers.Conv2DTranspose( transformer_dim // 4, kernel_size=2, strides=2 ), keras.layers.LayerNormalization(epsilon=1e-6), keras.layers.Activation(activation), keras.layers.Conv2DTranspose( transformer_dim // 8, kernel_size=2, strides=2 ), keras.layers.Activation(activation), ] ) self.output_hypernetworks_mlps = [ MLP(transformer_dim, transformer_dim // 8, 3) for _ in range(self.num_mask_tokens) ] self.iou_prediction_head = MLP( iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth ) def build(self, input_shape=None): self.transformer.build() self.iou_token.build([None]) self.mask_tokens.build([None]) self.output_upscaling.build([None, None, None, self.transformer_dim]) for mlp in self.output_hypernetworks_mlps: mlp.build([None, self.transformer_dim]) self.iou_prediction_head.build([None, self.transformer_dim]) self.built = True def call(self, inputs): image_embeddings = inputs["image_embeddings"] image_pe = inputs["image_pe"] sparse_prompt_embeddings = inputs["sparse_prompt_embeddings"] dense_prompt_embeddings = inputs["dense_prompt_embeddings"] masks, iou_pred = self._predict_masks( image_embeddings=image_embeddings, image_pe=image_pe, sparse_prompt_embeddings=sparse_prompt_embeddings, dense_prompt_embeddings=dense_prompt_embeddings, ) return {"masks": masks, "iou_pred": iou_pred} def _predict_masks( self, image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, ): indices_iou = ops.arange(1, dtype="int32") indices_mask = ops.arange(self.num_mask_tokens, dtype="int32") output_tokens = ops.concatenate( [self.iou_token(indices_iou), self.mask_tokens(indices_mask)], axis=0, ) output_tokens = ops.broadcast_to( output_tokens[None, ...], shape=( ops.shape(sparse_prompt_embeddings)[0], ops.shape(output_tokens)[0], ops.shape(output_tokens)[1], ), ) tokens = ops.concatenate( [output_tokens, sparse_prompt_embeddings], axis=1 ) source = ops.broadcast_to( image_embeddings, shape=( ops.shape(tokens)[0], ops.shape(image_embeddings)[1], ops.shape(image_embeddings)[2], ops.shape(image_embeddings)[3], ), ) source = source + dense_prompt_embeddings positional_source = ops.broadcast_to( image_pe, shape=( ops.shape(tokens)[0], ops.shape(image_embeddings)[1], ops.shape(image_embeddings)[2], ops.shape(image_embeddings)[3], ), ) shape = ops.shape(source) B, H, W, C = shape[0], shape[1], shape[2], shape[3] hidden_state, source = self.transformer( source, positional_source, tokens ) iou_token_out = hidden_state[:, 0, :] mask_tokens_out = hidden_state[:, 1 : (1 + self.num_mask_tokens), :] source = ops.reshape(source, (B, H, W, C)) upscaled_embeddings = self.output_upscaling(source) hyper_in_list = [] for i in range(self.num_mask_tokens): hyper_in_list.append( self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) ) hyper_in = ops.stack(hyper_in_list, axis=1) shape = ops.shape(upscaled_embeddings) B, H, W, C = shape[0], shape[1], shape[2], shape[3] upscaled_embeddings = ops.reshape( ops.transpose(upscaled_embeddings, axes=(0, 3, 1, 2)), (B, C, H * W), ) masks = ops.reshape( hyper_in @ upscaled_embeddings, (B, self.num_mask_tokens, H, W) ) iou_pred = self.iou_prediction_head(iou_token_out) return masks, iou_pred def get_config(self): config = super().get_config() config.update( { "transformer_dim": self.transformer_dim, "transformer": keras.saving.serialize_keras_object( self.transformer ), "num_multimask_outputs": self.num_multimask_outputs, "iou_head_depth": self.iou_head_depth, "iou_head_hidden_dim": self.iou_head_hidden_dim, "activation": self.activation, } ) return config @classmethod def from_config(cls, config): config.update( {"transformer": keras.layers.deserialize(config["transformer"])} ) return super().from_config(config)
keras-cv/keras_cv/models/segmentation/segment_anything/sam_mask_decoder.py/0
{ "file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_mask_decoder.py", "repo_id": "keras-cv", "token_count": 4266 }
26
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras implementation of StableDiffusion. Credits: - Original implementation: https://github.com/CompVis/stable-diffusion - Initial TF/Keras port: https://github.com/divamgupta/stable-diffusion-tensorflow The current implementation is a rewrite of the initial TF/Keras port by Divam Gupta. """ import math import numpy as np from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.backend import random from keras_cv.models.stable_diffusion.clip_tokenizer import SimpleTokenizer from keras_cv.models.stable_diffusion.constants import _ALPHAS_CUMPROD from keras_cv.models.stable_diffusion.constants import _UNCONDITIONAL_TOKENS from keras_cv.models.stable_diffusion.decoder import Decoder from keras_cv.models.stable_diffusion.diffusion_model import DiffusionModel from keras_cv.models.stable_diffusion.diffusion_model import DiffusionModelV2 from keras_cv.models.stable_diffusion.image_encoder import ImageEncoder from keras_cv.models.stable_diffusion.text_encoder import TextEncoder from keras_cv.models.stable_diffusion.text_encoder import TextEncoderV2 MAX_PROMPT_LENGTH = 77 class StableDiffusionBase: """Base class for stable diffusion and stable diffusion v2 model.""" def __init__( self, img_height=512, img_width=512, jit_compile=True, ): # UNet requires multiples of 2**7 = 128 img_height = round(img_height / 128) * 128 img_width = round(img_width / 128) * 128 self.img_height = img_height self.img_width = img_width # lazy initialize the component models and the tokenizer self._image_encoder = None self._text_encoder = None self._diffusion_model = None self._decoder = None self._tokenizer = None self.jit_compile = jit_compile def text_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, ): encoded_text = self.encode_text(prompt) return self.generate_image( encoded_text, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, ) def encode_text(self, prompt): """Encodes a prompt into a latent text encoding. The encoding produced by this method should be used as the `encoded_text` parameter of `StableDiffusion.generate_image`. Encoding text separately from generating an image can be used to arbitrarily modify the text encoding prior to image generation, e.g. for walking between two prompts. Args: prompt: a string to encode, must be 77 tokens or shorter. Example: ```python from keras_cv.models import StableDiffusion model = StableDiffusion(img_height=512, img_width=512, jit_compile=True) encoded_text = model.encode_text("Tacos at dawn") img = model.generate_image(encoded_text) ``` """ # Tokenize prompt (i.e. starting context) inputs = self.tokenizer.encode(prompt) if len(inputs) > MAX_PROMPT_LENGTH: raise ValueError( f"Prompt is too long (should be <= {MAX_PROMPT_LENGTH} tokens)" ) phrase = inputs + [49407] * (MAX_PROMPT_LENGTH - len(inputs)) phrase = ops.convert_to_tensor([phrase], dtype="int32") context = self.text_encoder.predict_on_batch( {"tokens": phrase, "positions": self._get_pos_ids()} ) return context def generate_image( self, encoded_text, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, diffusion_noise=None, seed=None, ): """Generates an image based on encoded text. The encoding passed to this method should be derived from `StableDiffusion.encode_text`. Args: encoded_text: Tensor of shape (`batch_size`, 77, 768), or a Tensor of shape (77, 768). When the batch axis is omitted, the same encoded text will be used to produce every generated image. batch_size: int, number of images to generate, defaults to 1. negative_prompt: a string containing information to negatively guide the image generation (e.g. by removing or altering certain aspects of the generated image), defaults to None. num_steps: int, number of diffusion steps (controls image quality), defaults to 50. unconditional_guidance_scale: float, controlling how closely the image should adhere to the prompt. Larger values result in more closely adhering to the prompt, but will make the image noisier. Defaults to 7.5. diffusion_noise: Tensor of shape (`batch_size`, img_height // 8, img_width // 8, 4), or a Tensor of shape (img_height // 8, img_width // 8, 4). Optional custom noise to seed the diffusion process. When the batch axis is omitted, the same noise will be used to seed diffusion for every generated image. seed: integer which is used to seed the random generation of diffusion noise, only to be specified if `diffusion_noise` is None. Example: ```python from keras_cv.models import StableDiffusion from keras_core import ops batch_size = 8 model = StableDiffusion(img_height=512, img_width=512, jit_compile=True) e_tacos = model.encode_text("Tacos at dawn") e_watermelons = model.encode_text("Watermelons at dusk") e_interpolated = ops.linspace(e_tacos, e_watermelons, batch_size) images = model.generate_image(e_interpolated, batch_size=batch_size) ``` """ if diffusion_noise is not None and seed is not None: raise ValueError( "`diffusion_noise` and `seed` should not both be passed to " "`generate_image`. `seed` is only used to generate diffusion " "noise when it's not already user-specified." ) context = self._expand_tensor(encoded_text, batch_size) if negative_prompt is None: unconditional_context = ops.repeat( self._get_unconditional_context(), batch_size, axis=0 ) else: unconditional_context = self.encode_text(negative_prompt) unconditional_context = self._expand_tensor( unconditional_context, batch_size ) if diffusion_noise is not None: diffusion_noise = ops.squeeze(diffusion_noise) if len(ops.shape(diffusion_noise)) == 3: diffusion_noise = ops.repeat( ops.expand_dims(diffusion_noise, axis=0), batch_size, axis=0 ) latent = diffusion_noise else: latent = self._get_initial_diffusion_noise(batch_size, seed) # Iterative reverse diffusion stage num_timesteps = 1000 ratio = (num_timesteps - 1) / (num_steps - 1) timesteps = (np.arange(0, num_steps) * ratio).round().astype(np.int64) alphas, alphas_prev = self._get_initial_alphas(timesteps) progbar = keras.utils.Progbar(len(timesteps)) iteration = 0 for index, timestep in list(enumerate(timesteps))[::-1]: latent_prev = latent # Set aside the previous latent vector t_emb = self._get_timestep_embedding(timestep, batch_size) unconditional_latent = self.diffusion_model.predict_on_batch( { "latent": latent, "timestep_embedding": t_emb, "context": unconditional_context, } ) latent = self.diffusion_model.predict_on_batch( { "latent": latent, "timestep_embedding": t_emb, "context": context, } ) latent = ops.array( unconditional_latent + unconditional_guidance_scale * (latent - unconditional_latent) ) a_t, a_prev = alphas[index], alphas_prev[index] # Keras backend array need to cast explicitly target_dtype = latent_prev.dtype latent = ops.cast(latent, target_dtype) pred_x0 = (latent_prev - math.sqrt(1 - a_t) * latent) / math.sqrt( a_t ) latent = ( ops.array(latent) * math.sqrt(1.0 - a_prev) + math.sqrt(a_prev) * pred_x0 ) iteration += 1 progbar.update(iteration) # Decoding stage decoded = self.decoder.predict_on_batch(latent) decoded = ((decoded + 1) / 2) * 255 return np.clip(decoded, 0, 255).astype("uint8") def _get_unconditional_context(self): unconditional_tokens = ops.convert_to_tensor( [_UNCONDITIONAL_TOKENS], dtype="int32", ) unconditional_context = self.text_encoder.predict_on_batch( {"tokens": unconditional_tokens, "positions": self._get_pos_ids()} ) return unconditional_context def _expand_tensor(self, text_embedding, batch_size): """Extends a tensor by repeating it to fit the shape of the given batch size.""" text_embedding = ops.squeeze(text_embedding) if len(text_embedding.shape) == 2: text_embedding = ops.repeat( ops.expand_dims(text_embedding, axis=0), batch_size, axis=0 ) return text_embedding @property def image_encoder(self): """image_encoder returns the VAE Encoder with pretrained weights. Usage: ```python sd = keras_cv.models.StableDiffusion() my_image = np.ones((512, 512, 3)) latent_representation = sd.image_encoder.predict(my_image) ``` """ if self._image_encoder is None: self._image_encoder = ImageEncoder() if self.jit_compile: self._image_encoder.compile(jit_compile=True) return self._image_encoder @property def text_encoder(self): pass @property def diffusion_model(self): pass @property def decoder(self): """decoder returns the diffusion image decoder model with pretrained weights. Can be overriden for tasks where the decoder needs to be modified. """ if self._decoder is None: self._decoder = Decoder(self.img_height, self.img_width) if self.jit_compile: self._decoder.compile(jit_compile=True) return self._decoder @property def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None: self._tokenizer = SimpleTokenizer() return self._tokenizer def _get_timestep_embedding( self, timestep, batch_size, dim=320, max_period=10000 ): half = dim // 2 range = ops.cast(ops.arange(0, half), "float32") freqs = ops.exp(-math.log(max_period) * range / half) args = ops.convert_to_tensor([timestep], dtype="float32") * freqs embedding = ops.concatenate([ops.cos(args), ops.sin(args)], 0) embedding = ops.reshape(embedding, [1, -1]) return ops.repeat(embedding, batch_size, axis=0) def _get_initial_alphas(self, timesteps): alphas = [_ALPHAS_CUMPROD[t] for t in timesteps] alphas_prev = [1.0] + alphas[:-1] return alphas, alphas_prev def _get_initial_diffusion_noise(self, batch_size, seed): return random.normal( (batch_size, self.img_height // 8, self.img_width // 8, 4), seed=seed, ) @staticmethod def _get_pos_ids(): return ops.expand_dims(ops.arange(MAX_PROMPT_LENGTH, dtype="int32"), 0) @keras_cv_export("keras_cv.models.StableDiffusion") class StableDiffusion(StableDiffusionBase): """Keras implementation of Stable Diffusion. Note that the StableDiffusion API, as well as the APIs of the sub-components of StableDiffusion (e.g. ImageEncoder, DiffusionModel) should be considered unstable at this point. We do not guarantee backwards compatability for future changes to these APIs. Stable Diffusion is a powerful image generation model that can be used, among other things, to generate pictures according to a short text description (called a "prompt"). Arguments: img_height: int, height of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 512. img_width: int, width of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 512. jit_compile: bool, whether to compile the underlying models to XLA. This can lead to a significant speedup on some systems. Defaults to False. Example: ```python from keras_cv.models import StableDiffusion from PIL import Image model = StableDiffusion(img_height=512, img_width=512, jit_compile=True) img = model.text_to_image( prompt="A beautiful horse running through a field", batch_size=1, # How many images to generate at once num_steps=25, # Number of iterations (controls image quality) seed=123, # Set this to always get the same image from the same prompt ) Image.fromarray(img[0]).save("horse.png") print("saved at horse.png") ``` References: - [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement) - [Original implementation](https://github.com/CompVis/stable-diffusion) """ # noqa: E501 def __init__( self, img_height=512, img_width=512, jit_compile=True, ): super().__init__(img_height, img_width, jit_compile) print( "By using this model checkpoint, you acknowledge that its usage is " "subject to the terms of the CreativeML Open RAIL-M license at " "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE" # noqa: E501 ) @property def text_encoder(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder is None: self._text_encoder = TextEncoder(MAX_PROMPT_LENGTH) if self.jit_compile: self._text_encoder.compile(jit_compile=True) return self._text_encoder @property def diffusion_model(self): """diffusion_model returns the diffusion model with pretrained weights. Can be overriden for tasks where the diffusion model needs to be modified. """ if self._diffusion_model is None: self._diffusion_model = DiffusionModel( self.img_height, self.img_width, MAX_PROMPT_LENGTH ) if self.jit_compile: self._diffusion_model.compile(jit_compile=True) return self._diffusion_model @keras_cv_export("keras_cv.models.StableDiffusionV2") class StableDiffusionV2(StableDiffusionBase): """Keras implementation of Stable Diffusion v2. Note that the StableDiffusion API, as well as the APIs of the sub-components of StableDiffusionV2 (e.g. ImageEncoder, DiffusionModelV2) should be considered unstable at this point. We do not guarantee backwards compatability for future changes to these APIs. Stable Diffusion is a powerful image generation model that can be used, among other things, to generate pictures according to a short text description (called a "prompt"). Arguments: img_height: int, height of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 512. img_width: int, width of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 512. jit_compile: bool, whether to compile the underlying models to XLA. This can lead to a significant speedup on some systems. Defaults to False. Example: ```python from keras_cv.models import StableDiffusionV2 from PIL import Image model = StableDiffusionV2(img_height=512, img_width=512, jit_compile=True) img = model.text_to_image( prompt="A beautiful horse running through a field", batch_size=1, # How many images to generate at once num_steps=25, # Number of iterations (controls image quality) seed=123, # Set this to always get the same image from the same prompt ) Image.fromarray(img[0]).save("horse.png") print("saved at horse.png") ``` References: - [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement) - [Original implementation](https://github.com/Stability-AI/stablediffusion) """ # noqa: E501 def __init__( self, img_height=512, img_width=512, jit_compile=True, ): super().__init__(img_height, img_width, jit_compile) print( "By using this model checkpoint, you acknowledge that its usage is " "subject to the terms of the CreativeML Open RAIL++-M license at " "https://github.com/Stability-AI/stablediffusion/blob/main/LICENSE-MODEL" # noqa: E501 ) @property def text_encoder(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder is None: self._text_encoder = TextEncoderV2(MAX_PROMPT_LENGTH) if self.jit_compile: self._text_encoder.compile(jit_compile=True) return self._text_encoder @property def diffusion_model(self): """diffusion_model returns the diffusion model with pretrained weights. Can be overriden for tasks where the diffusion model needs to be modified. """ if self._diffusion_model is None: self._diffusion_model = DiffusionModelV2( self.img_height, self.img_width, MAX_PROMPT_LENGTH ) if self.jit_compile: self._diffusion_model.compile(jit_compile=True) return self._diffusion_model
keras-cv/keras_cv/models/stable_diffusion/stable_diffusion.py/0
{ "file_path": "keras-cv/keras_cv/models/stable_diffusion/stable_diffusion.py", "repo_id": "keras-cv", "token_count": 8533 }
27
<jupyter_start><jupyter_text>Setup<jupyter_code>!pip install -q git+https://github.com/divyashreepathihalli/keras-cv.git@CLIP_refactor !pip install -q keras-nlp !pip install -q tf-keras !pip install -q tensorflow-text !pip install keras==3.0.2<jupyter_output>Installing build dependencies ... [?25l[?25hdone Getting requirements to build wheel ... [?25l[?25hdone Preparing metadata (pyproject.toml) ... [?25l[?25hdone  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 950.8/950.8 kB 5.9 MB/s eta 0:00:00 [?25h Building wheel for keras-cv (pyproject.toml) ... [?25l[?25hdone  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 415.4/415.4 kB 5.4 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.2/5.2 MB 17.0 MB/s eta 0:00:00 [?25hCollecting keras==3.0.2 Downloading keras-3.0.2-py3-none-any.whl (1.0 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.0/1.0 MB 8.4 MB/s eta 0:00:00 [?25hRequirement already satisfied: absl-py in /usr/local/lib/python3.10/dist-packages (from keras==3.0.2) (1.4.0) Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packa[...]<jupyter_text>Import<jupyter_code>from keras_cv.models.feature_extractor.clip import CLIPProcessor import keras from keras_cv.models import CLIP !wget https://i.imgur.com/8H7XCH0.jpg -O cat.jpg !wget http://images.cocodataset.org/val2017/000000039769.jpg -O test.jpg # @title Select which model weights you would like to convert MODEL_CONFIGS = { "CLIP_B32": { "embed_dim": 512, "context_length": 77, "vocab_size": 49408, "transformer_width": 512, "transformer_heads": 8, "transformer_layers": 12, "vision_layers": 12, "vision_width": 768, "image_resolution": 224, "vision_patch_size": 32, }, "CLIP_B16": { "embed_dim": 512, "context_length": 77, "vocab_size": 49408, "transformer_width": 512, "transformer_heads": 8, "transformer_layers": 12, "vision_layers": 12, "vision_width": 768, "image_resolution": 224, "vision_patch_size": 16, }, "CLIP_L14": { "embed_dim": 768, "context_length": 77, "vocab_size": 49408, "transformer_width": 768, "transformer_heads": 12, "transformer_layers": 12, "vision_layers": 24, "vision_width": 1024, "image_resolution": 224, "vision_patch_size": 14, }, "CLIP_L14_336": { "embed_dim": 768, "context_length": 77, "vocab_size": 49408, "transformer_width": 768, "transformer_heads": 12, "transformer_layers": 12, "vision_layers": 24, "vision_width": 1024, "image_resolution": 336, "vision_patch_size": 14, }, } model_map_hf = { "CLIP_B16": "openai/clip-vit-base-patch32", "CLIP_B32": "openai/clip-vit-base-patch16", "CLIP_L14": "openai/clip-vit-large-patch14", "CLIP_L14_336": "openai/clip-vit-large-patch14-336", } config_name = "CLIP_L14_336" # @param ["CLIP_B16", "CLIP_B32", "CLIP_L14", "CLIP_L14_336"] config_name_hf = model_map_hf[config_name]<jupyter_output><empty_output><jupyter_text>Keras 3 CLIP<jupyter_code>embed_dim = MODEL_CONFIGS[config_name]["embed_dim"] context_length = MODEL_CONFIGS[config_name]["context_length"] vocab_size = MODEL_CONFIGS[config_name]["vocab_size"] transformer_width = MODEL_CONFIGS[config_name]["transformer_width"] transformer_heads = MODEL_CONFIGS[config_name]["transformer_heads"] transformer_layers = MODEL_CONFIGS[config_name]["transformer_layers"] vision_layers = MODEL_CONFIGS[config_name]["vision_layers"] vision_width = MODEL_CONFIGS[config_name]["vision_width"] vision_patch_size = MODEL_CONFIGS[config_name]["vision_patch_size"] image_resolution = MODEL_CONFIGS[config_name]["image_resolution"] model = CLIP( embed_dim, image_resolution, vision_layers, vision_width, vision_patch_size, context_length, vocab_size, transformer_width, transformer_heads, transformer_layers, ) model.summary() processor = CLIPProcessor(224, "vocab.json", "merges.txt") image = processor.process_images(["cat.jpg"]) text_input = [ "photo of a cat on a tortoise", "tortoise on a dog", "a photo of a tortoise", ] text = processor.process_texts(text_input) image_logits, text_logits = model(image, text) output = keras.layers.Softmax()(image_logits) print(image_logits) print(text_input[keras.ops.argmax(output)]) model.summary()<jupyter_output><empty_output><jupyter_text>HF CLIP<jupyter_code>from PIL import Image import requests from transformers import CLIPProcessor as CP from transformers import CLIPModel as CM model_hf = CM.from_pretrained(config_name_hf) processor = CP.from_pretrained(config_name_hf) url = "https://i.imgur.com/8H7XCH0.jpg" image_hf = Image.open(requests.get(url, stream=True).raw) text_inputs = [ "photo of a cat on a tortoise", "tortoise on a dog", "a photo of a tortoise", ] inputs = processor( text=text_inputs, images=image_hf, return_tensors="pt", padding=True ) outputs = model_hf(**inputs) logits_per_image = ( outputs.logits_per_image ) # this is the image-text similarity score probs = logits_per_image.softmax( dim=1 ) # we can take the softmax to get the label probabilitiesprobs probs # hugging face weights hf_wts = model_hf.state_dict()<jupyter_output><empty_output><jupyter_text>Copy weights vision encoder<jupyter_code>model.logit_scale.assign(hf_wts.pop("logit_scale").numpy()) model.get_layer("image_encoder").get_layer( "clip_patching_and_embedding" ).class_embedding.assign( hf_wts.pop("vision_model.embeddings.class_embedding").numpy() ) model.get_layer("image_encoder").get_layer( "clip_patching_and_embedding" ).positional_embedding.assign( hf_wts.pop("vision_model.embeddings.position_embedding.weight").numpy() ) model.get_layer("image_encoder").get_layer( "clip_patching_and_embedding" ).conv1.weights[0].assign( hf_wts.pop("vision_model.embeddings.patch_embedding.weight") .permute(3, 2, 1, 0) .numpy() ) model.get_layer("image_encoder").get_layer("ln_1").weights[0].assign( hf_wts.pop("vision_model.pre_layrnorm.weight").numpy() ) model.get_layer("image_encoder").get_layer("ln_1").weights[1].assign( hf_wts.pop("vision_model.pre_layrnorm.bias").numpy() ) model.get_layer("image_encoder").get_layer("ln_2").weights[0].assign( hf_wts.pop("vision_model.post_layernorm.weight").numpy() ) model.get_layer("image_encoder").get_layer("ln_2").weights[1].assign( hf_wts.pop("vision_model.post_layernorm.bias").numpy() ) model.get_layer("image_encoder").get_layer("vision_projector").weights[ 0 ].assign(hf_wts.pop("visual_projection.weight").transpose(1, 0).numpy()) for i in range(0, MODEL_CONFIGS[config_name]["vision_layers"]): if i == 0: residual_attention = f"residual_attention" else: residual_attention = f"residual_attention_{i}" model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).attn.q_proj.weights[0].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.q_proj.weight") ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).attn.q_proj.weights[1].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.q_proj.bias") ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).attn.k_proj.weights[0].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.k_proj.weight") ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).attn.k_proj.weights[1].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.k_proj.bias") ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).attn.v_proj.weights[0].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.v_proj.weight") ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).attn.v_proj.weights[1].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.v_proj.bias") ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).attn.out_proj.weights[1].assign( hf_wts.pop( f"vision_model.encoder.layers.{i}.self_attn.out_proj.bias" ).numpy() ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).attn.out_proj.weights[0].assign( hf_wts.pop( f"vision_model.encoder.layers.{i}.self_attn.out_proj.weight" ).numpy() ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).ln_1.weights[0].assign( hf_wts.pop( f"vision_model.encoder.layers.{i}.layer_norm1.weight" ).numpy() ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).ln_1.weights[1].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.layer_norm1.bias").numpy() ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).ln_2.weights[0].assign( hf_wts.pop( f"vision_model.encoder.layers.{i}.layer_norm2.weight" ).numpy() ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).ln_2.weights[1].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.layer_norm2.bias").numpy() ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).mlp.get_layer("c_fc").weights[ 0 ].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.mlp.fc1.weight") .transpose(1, 0) .numpy() ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).mlp.get_layer("c_fc").weights[ 1 ].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.mlp.fc1.bias").numpy() ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).mlp.get_layer("c_proj").weights[ 0 ].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.mlp.fc2.weight") .transpose(1, 0) .numpy() ) model.get_layer("image_encoder").get_layer( "clip_encoder" ).resblocks.get_layer(residual_attention).mlp.get_layer("c_proj").weights[ 1 ].assign( hf_wts.pop(f"vision_model.encoder.layers.{i}.mlp.fc2.bias").numpy() )<jupyter_output><empty_output><jupyter_text>Text encoder<jupyter_code>num_transformer_layers = MODEL_CONFIGS[config_name]["vision_layers"] model.get_layer("text_encoder").get_layer("text_projector").weights[0].assign( hf_wts.pop("text_projection.weight").numpy() ) model.get_layer("text_encoder").get_layer("token_embedding").weights[0].assign( hf_wts.pop("text_model.embeddings.token_embedding.weight").numpy() ) model.get_layer("text_encoder").positional_embedding.assign( hf_wts.pop("text_model.embeddings.position_embedding.weight").numpy() ) model.get_layer("text_encoder").get_layer("ln_final").weights[0].assign( hf_wts.pop("text_model.final_layer_norm.weight") ) model.get_layer("text_encoder").get_layer("ln_final").weights[1].assign( hf_wts.pop("text_model.final_layer_norm.bias") ) for i in range(MODEL_CONFIGS[config_name]["transformer_layers"]): model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).attn.k_proj.weights[ 0 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.k_proj.weight") ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).attn.k_proj.weights[ 1 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.k_proj.bias") ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).attn.q_proj.weights[ 0 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.q_proj.weight") ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).attn.q_proj.weights[ 1 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.q_proj.bias") ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).attn.v_proj.weights[ 0 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.v_proj.weight") ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).attn.v_proj.weights[ 1 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.v_proj.bias") ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).attn.out_proj.weights[ 0 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.out_proj.weight") ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).attn.out_proj.weights[ 1 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.out_proj.bias") ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).ln_1.weights[ 0 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.layer_norm1.weight").numpy() ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).ln_1.weights[ 1 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.layer_norm1.bias").numpy() ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).ln_2.weights[ 0 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.layer_norm2.weight").numpy() ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).ln_2.weights[ 1 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.layer_norm2.bias").numpy() ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).mlp.get_layer( "c_fc" ).weights[ 0 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.mlp.fc1.weight") .transpose(1, 0) .numpy() ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).mlp.get_layer( "c_fc" ).weights[ 1 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.mlp.fc1.bias").numpy() ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).mlp.get_layer( "c_proj" ).weights[ 0 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.mlp.fc2.weight") .transpose(1, 0) .numpy() ) model.get_layer("text_encoder").get_layer( "clip_encoder" ).resblocks.get_layer( f"residual_attention_{num_transformer_layers+i}" ).mlp.get_layer( "c_proj" ).weights[ 1 ].assign( hf_wts.pop(f"text_model.encoder.layers.{i}.mlp.fc2.bias").numpy() ) # verify that we copied all weights hf_wts.keys()<jupyter_output><empty_output><jupyter_text>save weights<jupyter_code>model.save_weights("clip-vit-base-patch32.weights.h5")<jupyter_output><empty_output>
keras-cv/keras_cv/tools/checkpoint_conversion/clip_weights_conversion.ipynb/0
{ "file_path": "keras-cv/keras_cv/tools/checkpoint_conversion/clip_weights_conversion.ipynb", "repo_id": "keras-cv", "token_count": 8283 }
28
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import numpy as np import pytest from absl.testing import parameterized from keras_cv.models import DeepLabV3Plus from keras_cv.models import ImageClassifier from keras_cv.models import RetinaNet from keras_cv.models import YOLOV8Detector from keras_cv.tests.test_case import TestCase from keras_cv.utils import preset_utils class PresetUtilsTest(TestCase): @parameterized.parameters( (ImageClassifier, "resnet50_v2_imagenet_classifier", "classification"), ( ImageClassifier, "efficientnetv2_s_imagenet_classifier", "classification", ), ( ImageClassifier, "mobilenet_v3_large_imagenet_classifier", "classification", ), (YOLOV8Detector, "yolo_v8_m_pascalvoc", "detection"), (RetinaNet, "retinanet_resnet50_pascalvoc", "detection"), (DeepLabV3Plus, "deeplab_v3_plus_resnet50_pascalvoc", "segmentation"), ) @pytest.mark.large def test_preset_saving(self, cls, preset_name, task_type): save_dir = self.get_temp_dir() if task_type == "detection": model = cls.from_preset(preset_name, bounding_box_format="xywh") else: model = cls.from_preset(preset_name) preset_utils.save_to_preset(model, save_dir) # Check existence of files self.assertTrue(os.path.exists(os.path.join(save_dir, "config.json"))) self.assertTrue( os.path.exists(os.path.join(save_dir, "model.weights.h5")) ) self.assertTrue(os.path.exists(os.path.join(save_dir, "metadata.json"))) # Check the model config (`config.json`) with open(os.path.join(save_dir, "config.json"), "r") as f: config_json = f.read() self.assertTrue( "build_config" not in config_json ) # Test on raw json to include nested keys self.assertTrue( "compile_config" not in config_json ) # Test on raw json to include nested keys config = json.loads(config_json) self.assertEqual(config["weights"], "model.weights.h5") # Try loading the model from preset directory restored_model = preset_utils.load_from_preset(save_dir) input_batch = np.ones(shape=(2, 224, 224, 3)) expected_output = model(input_batch) restored_output = restored_model(input_batch) self.assertAllClose(expected_output, restored_output) def test_preset_errors(self): with self.assertRaisesRegex(ValueError, "must be a string"): ImageClassifier.from_preset(ImageClassifier) with self.assertRaisesRegex(ValueError, "Unknown preset identifier"): ImageClassifier.from_preset("taggle://rednet/rednet/rednet")
keras-cv/keras_cv/utils/preset_utils_test.py/0
{ "file_path": "keras-cv/keras_cv/utils/preset_utils_test.py", "repo_id": "keras-cv", "token_count": 1377 }
29
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script to create (and optionally install) a `.whl` archive for KerasCV. Usage: 1. Create a `.whl` file in `dist/`: ``` python3 pip_build.py ``` 2. Also install the new package immediately after: ``` python3 pip_build.py --install ``` """ import argparse import datetime import glob import os import pathlib import shutil import namex package = "keras_cv" build_directory = "tmp_build_dir" dist_directory = "dist" to_copy = [ "setup.py", "setup.cfg", "README.md", ] def ignore_files(_, filenames): return [f for f in filenames if "_test" in f] def export_version_string(version, is_nightly=False): """Export Version and Package Name.""" if is_nightly: date = datetime.datetime.now() version += f".dev{date.strftime('%Y%m%d%H')}" # Replaces `name="keras-cv"` in `setup.py` with `keras-cv-nightly` with open("setup.py") as f: setup_contents = f.read() with open("setup.py", "w") as f: setup_contents = setup_contents.replace( 'name="keras-cv"', 'name="keras-cv-nightly"' ) f.write(setup_contents) # Overwrite the version string with our package version. with open(os.path.join(package, "src", "version_utils.py")) as f: version_contents = f.readlines() with open(os.path.join(package, "src", "version_utils.py"), "w") as f: for line in version_contents: if line.startswith("__version__"): f.write(f'__version__ = "{version}"\n') else: f.write(line) # Make sure to export the __version__ string with open(os.path.join(package, "__init__.py")) as f: init_contents = f.read() with open(os.path.join(package, "__init__.py"), "w") as f: f.write(init_contents) f.write("from keras_cv.src.version_utils import __version__\n") def copy_source_to_build_directory(root_path): # Copy sources (`keras_cv/` directory and setup files) to build # directory os.chdir(root_path) os.mkdir(build_directory) shutil.copytree( package, os.path.join(build_directory, package), ignore=ignore_files ) for fname in to_copy: shutil.copy(fname, os.path.join(f"{build_directory}", fname)) os.chdir(build_directory) def run_namex_conversion(): # Restructure the codebase so that source files live in `keras_cv/src` namex.convert_codebase(package, code_directory="src") # Generate API __init__.py files in `keras_cv/` namex.generate_api_files(package, code_directory="src", verbose=True) def build_and_save_output(root_path, __version__): """Build the package.""" os.system("python3 -m build") # Save the dist files generated by the build process os.chdir(root_path) if not os.path.exists(dist_directory): os.mkdir(dist_directory) for fpath in glob.glob( os.path.join(build_directory, dist_directory, "*.*") ): shutil.copy(fpath, dist_directory) # Find the .whl file path whl_path = None for fname in os.listdir(dist_directory): if __version__ in fname and fname.endswith(".whl"): whl_path = os.path.abspath(os.path.join(dist_directory, fname)) if whl_path: print(f"Build successful. Wheel file available at {whl_path}") else: print("Build failed.") return whl_path def build(root_path, is_nightly=False): if os.path.exists(build_directory): raise ValueError(f"Directory already exists: {build_directory}") try: copy_source_to_build_directory(root_path) run_namex_conversion() # Make sure to export the __version__ string from keras_cv.src import __version__ # noqa: E402 export_version_string(__version__, is_nightly) return build_and_save_output(root_path, __version__) finally: # Clean up: remove the build directory (no longer needed) shutil.rmtree(build_directory) def install_whl(whl_fpath): print("Installing wheel file.") os.system(f"pip3 install {whl_fpath} --force-reinstall --no-dependencies") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--install", action="store_true", help="Whether to install the generated wheel file.", ) parser.add_argument( "--nightly", action="store_true", help="Whether to generate nightly wheel file.", ) args = parser.parse_args() root_path = pathlib.Path(__file__).parent.resolve() whl_path = build(root_path, args.nightly) if whl_path and args.install: install_whl(whl_path)
keras-cv/pip_build.py/0
{ "file_path": "keras-cv/pip_build.py", "repo_id": "keras-cv", "token_count": 2112 }
30
import json import os import sys import tensorboard as tb from absl import flags flags.DEFINE_string( "model_name", None, "The name of the KerasCV.model that was trained" ) flags.DEFINE_string( "tensorboard_logs_path", None, "Path to tensorboard logs to load" ) flags.DEFINE_string("training_script_path", None, "Path to the training script") flags.DEFINE_string( "script_version", None, "commit hash of the latest commit in KerasCV/master " "for the training script", ) flags.DEFINE_string( "weights_version", None, "The version of the training script used to produce the latest weights. " "For example, v0", ) flags.DEFINE_string( "contributor", None, "The GitHub username of the contributor of these results", ) flags.DEFINE_string( "accelerators", None, "The number of accelerators used for training." ) FLAGS = flags.FLAGS FLAGS(sys.argv) model_name = FLAGS.model_name or input( "Input the name of the KerasCV.model that was trained\n" ) weights_version = FLAGS.weights_version or input( "Input the weights version for your script\n" ) training_script_path = FLAGS.training_script_path or input( "Input the path to your training script\n" ) full_training_script_path = os.path.abspath(training_script_path) # Build an experiment name. # This will be structured as task/training_script_name/model_name-version training_script_rooted_at_training = full_training_script_path[ full_training_script_path.index("keras-cv/examples/training/") + 27 : ] training_script_dirs = training_script_rooted_at_training.split("/") tensorboard_experiment_name = f"{training_script_dirs[0]}/{'/'.join(training_script_dirs[1:])[:-3]}/{model_name}-{weights_version}" # noqa: E501 training_script_json_path = full_training_script_path[ : full_training_script_path.index("keras-cv/examples/training/") + 27 ] + "/".join(training_script_dirs[:2] + ["training_history.json"]) script_version = FLAGS.script_version or input( "Input the commit hash of the latest commit in KerasCV/master " "for the training script used for training." ) tensorboard_logs_path = FLAGS.tensorboard_logs_path or input( "Input the path to the TensorBoard logs\n" ) tensorboard_experiment_id = ( os.popen( f"python3 -m tensorboard.main dev upload " f"--logdir {tensorboard_logs_path} " f"--name {tensorboard_experiment_name} " f"--one_shot --verbose 0" ) .read() .split("/")[-2] ) tensorboard_experiment = tb.data.experimental.ExperimentFromDev( tensorboard_experiment_id ) tensorboard_results = tensorboard_experiment.get_scalars() training_epochs = max( tensorboard_results[tensorboard_results.run == "train"].step ) results_tags = tensorboard_results.tag.unique() # Validation accuracy won't exist in all logs (e.g for OD tasks). # We capture the max validation accuracy if it exists, but otherwise omit it. max_validation_accuracy = None if ( "epoch_categorical_accuracy" in results_tags or "epoch_sparse_categorical_accuracy" in results_tags ): max_validation_accuracy = max( tensorboard_results[ (tensorboard_results.run == "validation") & ( (tensorboard_results.tag == "epoch_categorical_accuracy") | ( tensorboard_results.tag == "epoch_sparse_categorical_accuracy" ) ) ].value ) max_validation_accuracy = f"{max_validation_accuracy:.4f}" # Mean IOU won't exist in all logs (e.g for classification tasks). # We capture the max IOU if it exists, but otherwise omit it. max_mean_iou = None if "epoch_mean_io_u" in results_tags: max_mean_iou = max( tensorboard_results[ (tensorboard_results.run == "validation") & (tensorboard_results.tag == "epoch_mean_io_u") ].value ) max_mean_iou = f"{max_mean_iou:.4f}" contributor = FLAGS.contributor or input( "Input your GitHub username " "(or the username of the contributor, if it's not you)\n" ) accelerators = FLAGS.accelerators or input( "Input the number of accelerators used during training.\n" ) args = input( "Input any training arguments used for the training script.\n" "Use comma-separate, colon-split key-value pairs. For example:\n" "arg1:value, arg2:value\n" ) args_dict = {} for arg in args.split(","): if len(arg.strip()) == 0: continue key_value_pair = [s.strip() for s in arg.split(":")] args_dict[key_value_pair[0]] = key_value_pair[1] new_results = { "script": { "name": "/".join(training_script_dirs[2:]), "version": script_version, }, "epochs_trained": training_epochs, "tensorboard_logs": f"https://tensorboard.dev/experiment/{tensorboard_experiment_id}/", # noqa: E501 "contributor": contributor, "args": args_dict, "accelerators": int(accelerators), } if max_validation_accuracy is not None: new_results["validation_accuracy"] = max_validation_accuracy if max_mean_iou is not None: new_results["validation_mean_iou"] = max_mean_iou # Check if the JSON file already exists results_file = open(training_script_json_path, "r") results_string = results_file.read() results = json.loads(results_string) if results_string != "" else {} results_file.close() # If we've never run this script on this model, insert a record for it if model_name not in results: results[model_name] = {} # Add this run's results to the model's record model_results = results[model_name] model_results[weights_version] = new_results # Save the updated results results_file = open(training_script_json_path, "w") json.dump(results, results_file, indent=4, sort_keys=True) results_file.close()
keras-cv/shell/weights/update_training_history.py/0
{ "file_path": "keras-cv/shell/weights/update_training_history.py", "repo_id": "keras-cv", "token_count": 2223 }
31
Files: keras_cv/* Copyright © 2023 The KerasCV Authors All code in this repository excluding the code located in keras_cv/layers/preprocessing_3d/waymo is licensed under the Apache License, Version 2.0. The code appearing in the keras_cv/layers/preprocessing_3d/waymo folder is licensed under terms appearing below. Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2023 The KerasCV Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --- Files: keras_cv/layers/preprocessing_3d/waymo/* Copyright (c) 2023 Waymo LLC. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. Additional IP Rights Grant (Patents) "Works" means the code located at keras_cv/layers/preprocessing_3d/waymo licensed from Waymo LLC ("Waymo") for inclusion in the KerasCV project at github.com/keras-team/keras-cv. “Patents" means the pending U.S. Patent App. No. 63/418,259 and any issued patents arising therefrom. Subject to the terms and conditions of this license, Waymo hereby grants to you a limited worldwide, non-exclusive, royalty-free, personal patent license to make, have made, use, and import the Works, where such license applies only to those Patent claims that are necessarily infringed by the Works executing the ”preprocessing_3d” augmentation library on 3D perception tasks using the “lidaraugment_keraspolicy.py” file. This grant does not include claims that would be infringed by combining the Works with other works, utilizing the Works on other tasks, or as a consequence of further modification of the Works. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation or any other patent enforcement activity against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Works or any activity using the Works to execute functions for 3D perception tasks constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this license for the Works shall terminate as of the date such litigation is filed. DISCLAIMER THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
keras-cv/LICENSE/0
{ "file_path": "keras-cv/LICENSE", "repo_id": "keras-cv", "token_count": 4048 }
0
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Title: Train a Semantic Segmentation Model on Pascal VOC 2012 using KerasCV Author: [tanzhenyu](https://github.com/tanzhenyu) Date created: 2022/10/25 Last modified: 2022/10/25 Description: Use KerasCV to train a DeepLabV3 on Pascal VOC 2012. """ import sys import tensorflow as tf from absl import flags from absl import logging from tensorflow import keras from keras_cv import models from keras_cv.datasets.pascal_voc.segmentation import load flags.DEFINE_string( "weights_path", "weights_{epoch:02d}.h5", "Directory which will be used to store weight checkpoints.", ) flags.DEFINE_boolean( "mixed_precision", True, "whether to use FP16 mixed precision for training.", ) flags.DEFINE_string( "tensorboard_path", "logs", "Directory which will be used to store tensorboard logs.", ) flags.DEFINE_integer( "epochs", 100, "Number of epochs to run for.", ) flags.DEFINE_string( "model_name", None, "The model name to be trained", ) flags.DEFINE_string( "model_kwargs", "{}", "Keyword argument dictionary to pass to the constructor of the model being" " trained", ) FLAGS = flags.FLAGS FLAGS(sys.argv) if FLAGS.mixed_precision: logging.info("mixed precision training enabled") keras.mixed_precision.set_global_policy("mixed_float16") # Try to detect an available TPU. If none is present, defaults to # MirroredStrategy try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() strategy = tf.distribute.TPUStrategy(tpu) except ValueError: # MirroredStrategy is best for a single machine with one or multiple GPUs strategy = tf.distribute.MirroredStrategy() print("Number of accelerators: ", strategy.num_replicas_in_sync) # parameters from FasterRCNN [paper](https://arxiv.org/pdf/1506.01497.pdf) local_batch = 4 global_batch = local_batch * strategy.num_replicas_in_sync base_lr = 0.007 * global_batch / 16 # TODO(tanzhenyu): add a diff dataset. # all_ds = load(split="sbd_train", data_dir=None) # all_ds = all_ds.concatenate(load(split="sbd_eval", data_dir=None)) # train_ds = all_ds.take(10000) # eval_ds = all_ds.skip(10000).concatenate(load(split="diff", data_dir=None)) train_ds = load(split="sbd_train", data_dir=None) eval_ds = load(split="sbd_eval", data_dir=None) resize_layer = keras.layers.Resizing(512, 512, interpolation="nearest") image_size = [512, 512, 3] # TODO(tanzhenyu): move to KPL. def flip_fn(image, cls_seg): if tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32) > 0.5: image = tf.image.flip_left_right(image) cls_seg = tf.reverse(cls_seg, axis=[1]) return image, cls_seg def proc_train_fn(examples): image = examples.pop("image") image = tf.cast(image, tf.float32) image = resize_layer(image) cls_seg = examples.pop("class_segmentation") cls_seg = tf.cast(cls_seg, tf.float32) cls_seg = resize_layer(cls_seg) image, cls_seg = flip_fn(image, cls_seg) cls_seg = tf.cast(cls_seg, tf.uint8) sample_weight = tf.equal(cls_seg, 255) zeros = tf.zeros_like(cls_seg) cls_seg = tf.where(sample_weight, zeros, cls_seg) return image, cls_seg def proc_eval_fn(examples): image = examples.pop("image") image = tf.cast(image, tf.float32) image = resize_layer(image) cls_seg = examples.pop("class_segmentation") cls_seg = tf.cast(cls_seg, tf.float32) cls_seg = resize_layer(cls_seg) cls_seg = tf.cast(cls_seg, tf.uint8) sample_weight = tf.equal(cls_seg, 255) zeros = tf.zeros_like(cls_seg) cls_seg = tf.where(sample_weight, zeros, cls_seg) return image, cls_seg train_ds = train_ds.map(proc_train_fn, num_parallel_calls=tf.data.AUTOTUNE) train_ds = train_ds.batch(global_batch, drop_remainder=True) eval_ds = eval_ds.map(proc_eval_fn, num_parallel_calls=tf.data.AUTOTUNE) eval_ds = eval_ds.batch(global_batch, drop_remainder=True) train_ds = train_ds.shuffle(8) train_ds = train_ds.prefetch(2) with strategy.scope(): lr_decay = keras.optimizers.schedules.PiecewiseConstantDecay( boundaries=[30000 * 16 / global_batch], values=[base_lr, 0.1 * base_lr], ) backbone = models.ResNet50V2( include_rescaling=True, # This argument gives a 2% mIoU increase stackwise_dilations=[1, 1, 1, 2], input_shape=(512, 512, 3), include_top=False, weights="imagenet", ) model = models.__dict__[FLAGS.model_name] model = model(num_classes=21, backbone=backbone, **eval(FLAGS.model_kwargs)) optimizer = keras.optimizers.SGD( learning_rate=lr_decay, momentum=0.9, clipnorm=10.0 ) # ignore 255 as the class for semantic boundary. loss_fn = keras.losses.SparseCategoricalCrossentropy(ignore_class=255) metrics = [ keras.metrics.SparseCategoricalCrossentropy(ignore_class=255), keras.metrics.MeanIoU(num_classes=21, sparse_y_pred=False), keras.metrics.SparseCategoricalAccuracy(), ] callbacks = [ keras.callbacks.ModelCheckpoint( filepath=FLAGS.weights_path, monitor="val_mean_io_u", save_best_only=True, save_weights_only=True, ), keras.callbacks.TensorBoard( log_dir=FLAGS.tensorboard_path, write_steps_per_second=True ), ] model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics) model.fit( train_ds, epochs=FLAGS.epochs, validation_data=eval_ds, callbacks=callbacks )
keras-cv/examples/training/semantic_segmentation/pascal_voc/basic_training.py/0
{ "file_path": "keras-cv/examples/training/semantic_segmentation/pascal_voc/basic_training.py", "repo_id": "keras-cv", "token_count": 2420 }
1
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Converter functions for working with bounding box formats.""" import tensorflow as tf from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.backend.scope import tf_data # Internal exception to propagate the fact images was not passed to a converter # that needs it. class RequiresImagesException(Exception): pass ALL_AXES = 4 def _encode_box_to_deltas( anchors, boxes, anchor_format: str, box_format: str, variance=None, image_shape=None, ): """Converts bounding_boxes from `center_yxhw` to delta format.""" if variance is not None: variance = ops.convert_to_tensor(variance, "float32") var_len = variance.shape[-1] if var_len != 4: raise ValueError(f"`variance` must be length 4, got {variance}") encoded_anchors = convert_format( anchors, source=anchor_format, target="center_yxhw", image_shape=image_shape, ) boxes = convert_format( boxes, source=box_format, target="center_yxhw", image_shape=image_shape ) anchor_dimensions = ops.maximum( encoded_anchors[..., 2:], keras.backend.epsilon() ) box_dimensions = ops.maximum(boxes[..., 2:], keras.backend.epsilon()) # anchors be unbatched, boxes can either be batched or unbatched. boxes_delta = ops.concatenate( [ (boxes[..., :2] - encoded_anchors[..., :2]) / anchor_dimensions, ops.log(box_dimensions / anchor_dimensions), ], axis=-1, ) if variance is not None: boxes_delta /= variance return boxes_delta def _decode_deltas_to_boxes( anchors, boxes_delta, anchor_format: str, box_format: str, variance=None, image_shape=None, ): """Converts bounding_boxes from delta format to `center_yxhw`.""" if variance is not None: variance = ops.convert_to_tensor(variance, "float32") var_len = variance.shape[-1] if var_len != 4: raise ValueError(f"`variance` must be length 4, got {variance}") def decode_single_level(anchor, box_delta): encoded_anchor = convert_format( anchor, source=anchor_format, target="center_yxhw", image_shape=image_shape, ) if variance is not None: box_delta = box_delta * variance # anchors be unbatched, boxes can either be batched or unbatched. box = ops.concatenate( [ box_delta[..., :2] * encoded_anchor[..., 2:] + encoded_anchor[..., :2], ops.exp(box_delta[..., 2:]) * encoded_anchor[..., 2:], ], axis=-1, ) box = convert_format( box, source="center_yxhw", target=box_format, image_shape=image_shape, ) return box if isinstance(anchors, dict) and isinstance(boxes_delta, dict): boxes = {} for lvl, anchor in anchors.items(): boxes[lvl] = decode_single_level(anchor, boxes_delta[lvl]) return boxes else: return decode_single_level(anchors, boxes_delta) def _center_yxhw_to_xyxy(boxes, images=None, image_shape=None): y, x, height, width = ops.split(boxes, ALL_AXES, axis=-1) return ops.concatenate( [x - width / 2.0, y - height / 2.0, x + width / 2.0, y + height / 2.0], axis=-1, ) def _center_xywh_to_xyxy(boxes, images=None, image_shape=None): x, y, width, height = ops.split(boxes, ALL_AXES, axis=-1) return ops.concatenate( [x - width / 2.0, y - height / 2.0, x + width / 2.0, y + height / 2.0], axis=-1, ) def _xywh_to_xyxy(boxes, images=None, image_shape=None): x, y, width, height = ops.split(boxes, ALL_AXES, axis=-1) return ops.concatenate([x, y, x + width, y + height], axis=-1) def _xyxy_to_center_yxhw(boxes, images=None, image_shape=None): left, top, right, bottom = ops.split(boxes, ALL_AXES, axis=-1) return ops.concatenate( [ (top + bottom) / 2.0, (left + right) / 2.0, bottom - top, right - left, ], axis=-1, ) def _rel_xywh_to_xyxy(boxes, images=None, image_shape=None): image_height, image_width = _image_shape(images, image_shape, boxes) x, y, width, height = ops.split(boxes, ALL_AXES, axis=-1) return ops.concatenate( [ image_width * x, image_height * y, image_width * (x + width), image_height * (y + height), ], axis=-1, ) def _xyxy_no_op(boxes, images=None, image_shape=None): return boxes def _xyxy_to_xywh(boxes, images=None, image_shape=None): left, top, right, bottom = ops.split(boxes, ALL_AXES, axis=-1) return ops.concatenate( [left, top, right - left, bottom - top], axis=-1, ) def _xyxy_to_rel_xywh(boxes, images=None, image_shape=None): image_height, image_width = _image_shape(images, image_shape, boxes) left, top, right, bottom = ops.split(boxes, ALL_AXES, axis=-1) left, right = ( left / image_width, right / image_width, ) top, bottom = top / image_height, bottom / image_height return ops.concatenate( [left, top, right - left, bottom - top], axis=-1, ) def _xyxy_to_center_xywh(boxes, images=None, image_shape=None): left, top, right, bottom = ops.split(boxes, ALL_AXES, axis=-1) return ops.concatenate( [ (left + right) / 2.0, (top + bottom) / 2.0, right - left, bottom - top, ], axis=-1, ) def _rel_xyxy_to_xyxy(boxes, images=None, image_shape=None): image_height, image_width = _image_shape(images, image_shape, boxes) left, top, right, bottom = ops.split( boxes, ALL_AXES, axis=-1, ) left, right = left * image_width, right * image_width top, bottom = top * image_height, bottom * image_height return ops.concatenate( [left, top, right, bottom], axis=-1, ) def _xyxy_to_rel_xyxy(boxes, images=None, image_shape=None): image_height, image_width = _image_shape(images, image_shape, boxes) left, top, right, bottom = ops.split( boxes, ALL_AXES, axis=-1, ) left, right = left / image_width, right / image_width top, bottom = top / image_height, bottom / image_height return ops.concatenate( [left, top, right, bottom], axis=-1, ) def _yxyx_to_xyxy(boxes, images=None, image_shape=None): y1, x1, y2, x2 = ops.split(boxes, ALL_AXES, axis=-1) return ops.concatenate([x1, y1, x2, y2], axis=-1) def _rel_yxyx_to_xyxy(boxes, images=None, image_shape=None): image_height, image_width = _image_shape(images, image_shape, boxes) top, left, bottom, right = ops.split( boxes, ALL_AXES, axis=-1, ) left, right = left * image_width, right * image_width top, bottom = top * image_height, bottom * image_height return ops.concatenate( [left, top, right, bottom], axis=-1, ) def _xyxy_to_yxyx(boxes, images=None, image_shape=None): x1, y1, x2, y2 = ops.split(boxes, ALL_AXES, axis=-1) return ops.concatenate([y1, x1, y2, x2], axis=-1) def _xyxy_to_rel_yxyx(boxes, images=None, image_shape=None): image_height, image_width = _image_shape(images, image_shape, boxes) left, top, right, bottom = ops.split(boxes, ALL_AXES, axis=-1) left, right = left / image_width, right / image_width top, bottom = top / image_height, bottom / image_height return ops.concatenate( [top, left, bottom, right], axis=-1, ) TO_XYXY_CONVERTERS = { "xywh": _xywh_to_xyxy, "center_xywh": _center_xywh_to_xyxy, "center_yxhw": _center_yxhw_to_xyxy, "rel_xywh": _rel_xywh_to_xyxy, "xyxy": _xyxy_no_op, "rel_xyxy": _rel_xyxy_to_xyxy, "yxyx": _yxyx_to_xyxy, "rel_yxyx": _rel_yxyx_to_xyxy, } FROM_XYXY_CONVERTERS = { "xywh": _xyxy_to_xywh, "center_xywh": _xyxy_to_center_xywh, "center_yxhw": _xyxy_to_center_yxhw, "rel_xywh": _xyxy_to_rel_xywh, "xyxy": _xyxy_no_op, "rel_xyxy": _xyxy_to_rel_xyxy, "yxyx": _xyxy_to_yxyx, "rel_yxyx": _xyxy_to_rel_yxyx, } @keras_cv_export("keras_cv.bounding_box.convert_format") @tf_data def convert_format( boxes, source, target, images=None, image_shape=None, dtype="float32" ): f"""Converts bounding_boxes from one format to another. Supported formats are: - `"xyxy"`, also known as `corners` format. In this format the first four axes represent `[left, top, right, bottom]` in that order. - `"rel_xyxy"`. In this format, the axes are the same as `"xyxy"` but the x coordinates are normalized using the image width, and the y axes the image height. All values in `rel_xyxy` are in the range `(0, 1)`. - `"xywh"`. In this format the first four axes represent `[left, top, width, height]`. - `"rel_xywh". In this format the first four axes represent [left, top, width, height], just like `"xywh"`. Unlike `"xywh"`, the values are in the range (0, 1) instead of absolute pixel values. - `"center_xyWH"`. In this format the first two coordinates represent the x and y coordinates of the center of the bounding box, while the last two represent the width and height of the bounding box. - `"center_yxHW"`. In this format the first two coordinates represent the y and x coordinates of the center of the bounding box, while the last two represent the height and width of the bounding box. - `"yxyx"`. In this format the first four axes represent [top, left, bottom, right] in that order. - `"rel_yxyx"`. In this format, the axes are the same as `"yxyx"` but the x coordinates are normalized using the image width, and the y axes the image height. All values in `rel_yxyx` are in the range (0, 1). Formats are case insensitive. It is recommended that you capitalize width and height to maximize the visual difference between `"xyWH"` and `"xyxy"`. Relative formats, abbreviated `rel`, make use of the shapes of the `images` passed. In these formats, the coordinates, widths, and heights are all specified as percentages of the host image. `images` may be a ragged Tensor. Note that using a ragged Tensor for images may cause a substantial performance loss, as each image will need to be processed separately due to the mismatching image shapes. Usage: ```python boxes = load_coco_dataset() boxes_in_xywh = keras_cv.bounding_box.convert_format( boxes, source='xyxy', target='xyWH' ) ``` Args: boxes: tensor representing bounding boxes in the format specified in the `source` parameter. `boxes` can optionally have extra dimensions stacked on the final axis to store metadata. boxes should be a 3D tensor, with the shape `[batch_size, num_boxes, 4]`. Alternatively, boxes can be a dictionary with key 'boxes' containing a tensor matching the aforementioned spec. source:One of {" ".join([f'"{f}"' for f in TO_XYXY_CONVERTERS.keys()])}. Used to specify the original format of the `boxes` parameter. target:One of {" ".join([f'"{f}"' for f in TO_XYXY_CONVERTERS.keys()])}. Used to specify the destination format of the `boxes` parameter. images: (Optional) a batch of images aligned with `boxes` on the first axis. Should be at least 3 dimensions, with the first 3 dimensions representing: `[batch_size, height, width]`. Used in some converters to compute relative pixel values of the bounding box dimensions. Required when transforming from a rel format to a non-rel format. dtype: the data type to use when transforming the boxes, defaults to `"float32"`. """ if isinstance(boxes, dict): converted_boxes = boxes.copy() converted_boxes["boxes"] = convert_format( boxes["boxes"], source=source, target=target, images=images, image_shape=image_shape, dtype=dtype, ) return converted_boxes if boxes.shape[-1] is not None and boxes.shape[-1] != 4: raise ValueError( "Expected `boxes` to be a Tensor with a final dimension of " f"`4`. Instead, got `boxes.shape={boxes.shape}`." ) if images is not None and image_shape is not None: raise ValueError( "convert_format() expects either `images` or `image_shape`, but " f"not both. Received images={images} image_shape={image_shape}" ) _validate_image_shape(image_shape) source = source.lower() target = target.lower() if source not in TO_XYXY_CONVERTERS: raise ValueError( "`convert_format()` received an unsupported format for the " "argument `source`. `source` should be one of " f"{TO_XYXY_CONVERTERS.keys()}. Got source={source}" ) if target not in FROM_XYXY_CONVERTERS: raise ValueError( "`convert_format()` received an unsupported format for the " "argument `target`. `target` should be one of " f"{FROM_XYXY_CONVERTERS.keys()}. Got target={target}" ) boxes = ops.cast(boxes, dtype) if source == target: return boxes # rel->rel conversions should not require images if source.startswith("rel") and target.startswith("rel"): source = source.replace("rel_", "", 1) target = target.replace("rel_", "", 1) boxes, images, squeeze = _format_inputs(boxes, images) to_xyxy_fn = TO_XYXY_CONVERTERS[source] from_xyxy_fn = FROM_XYXY_CONVERTERS[target] try: in_xyxy = to_xyxy_fn(boxes, images=images, image_shape=image_shape) result = from_xyxy_fn(in_xyxy, images=images, image_shape=image_shape) except RequiresImagesException: raise ValueError( "convert_format() must receive `images` or `image_shape` when " "transforming between relative and absolute formats." f"convert_format() received source=`{format}`, target=`{format}, " f"but images={images} and image_shape={image_shape}." ) return _format_outputs(result, squeeze) def _format_inputs(boxes, images): boxes_rank = len(boxes.shape) if boxes_rank > 3: raise ValueError( "Expected len(boxes.shape)=2, or len(boxes.shape)=3, got " f"len(boxes.shape)={boxes_rank}" ) boxes_includes_batch = boxes_rank == 3 # Determine if images needs an expand_dims() call if images is not None: images_rank = len(images.shape) if images_rank > 4: raise ValueError( "Expected len(images.shape)=2, or len(images.shape)=3, got " f"len(images.shape)={images_rank}" ) images_include_batch = images_rank == 4 if boxes_includes_batch != images_include_batch: raise ValueError( "convert_format() expects both boxes and images to be batched, " "or both boxes and images to be unbatched. Received " f"len(boxes.shape)={boxes_rank}, " f"len(images.shape)={images_rank}. Expected either " "len(boxes.shape)=2 AND len(images.shape)=3, or " "len(boxes.shape)=3 AND len(images.shape)=4." ) if not images_include_batch: images = ops.expand_dims(images, axis=0) if not boxes_includes_batch: return ops.expand_dims(boxes, axis=0), images, True return boxes, images, False def _validate_image_shape(image_shape): # Escape early if image_shape is None and skip validation. if image_shape is None: return # tuple/list if isinstance(image_shape, (tuple, list)): if len(image_shape) != 3: raise ValueError( "image_shape should be of length 3, but got " f"image_shape={image_shape}" ) return # tensor if ops.is_tensor(image_shape): if len(image_shape.shape) > 1: raise ValueError( "image_shape.shape should be (3), but got " f"image_shape.shape={image_shape.shape}" ) if image_shape.shape[0] != 3: raise ValueError( "image_shape.shape should be (3), but got " f"image_shape.shape={image_shape.shape}" ) return # Warn about failure cases raise ValueError( "Expected image_shape to be either a tuple, list, Tensor. " f"Received image_shape={image_shape}" ) def _format_outputs(boxes, squeeze): if squeeze: return ops.squeeze(boxes, axis=0) return boxes def _image_shape(images, image_shape, boxes): if images is None and image_shape is None: raise RequiresImagesException() if image_shape is None: if not isinstance(images, tf.RaggedTensor): image_shape = ops.shape(images) height, width = image_shape[1], image_shape[2] else: height = ops.reshape(images.row_lengths(), (-1, 1)) width = ops.reshape(ops.max(images.row_lengths(axis=2), 1), (-1, 1)) height = ops.expand_dims(height, axis=-1) width = ops.expand_dims(width, axis=-1) else: height, width = image_shape[0], image_shape[1] return ops.cast(height, boxes.dtype), ops.cast(width, boxes.dtype)
keras-cv/keras_cv/bounding_box/converters.py/0
{ "file_path": "keras-cv/keras_cv/bounding_box/converters.py", "repo_id": "keras-cv", "token_count": 7974 }
2
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from keras_cv import bounding_box from keras_cv.tests.test_case import TestCase class ValidateTest(TestCase): def test_raises_nondict(self): with self.assertRaisesRegex( ValueError, "Expected `bounding_boxes` to be a dictionary, got " ): bounding_box.validate_format(np.ones((4, 3, 6))) def test_mismatch_dimensions(self): with self.assertRaisesRegex( ValueError, "Expected `boxes` and `classes` to have matching dimensions", ): bounding_box.validate_format( {"boxes": np.ones((4, 3, 6)), "classes": np.ones((4, 6))} ) def test_bad_keys(self): with self.assertRaisesRegex(ValueError, "containing keys"): bounding_box.validate_format( { "box": [ 1, 2, 3, ], "class": [1234], } )
keras-cv/keras_cv/bounding_box/validate_format_test.py/0
{ "file_path": "keras-cv/keras_cv/bounding_box/validate_format_test.py", "repo_id": "keras-cv", "token_count": 716 }
3
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ formats.py contains axis information for each supported format. """ from keras_cv.api_export import keras_cv_export @keras_cv_export("keras_cv.keypoint.XY") class XY: """XY contains axis indices for the XY format. All values in the XY format should be absolute pixel values. The XY format consists of the following required indices: - X: the width position - Y: the height position and the following optional indices, used in some KerasCV components: - CLASS: class of the keypoints - CONFIDENCE: confidence of the keypoints """ X = 0 Y = 1 CLASS = 2 CONFIDENCE = 3 @keras_cv_export("keras_cv.keypoint.REL_XY") class REL_XY: """REL_XY contains axis indices for the REL_XY format. REL_XY is like XY, but each value is relative to the width and height of the origin image. Values are percentages of the origin images' width and height respectively. The REL_XY format consists of the following required indices: - X: the width position - Y: the height position and the following optional indices, used in some KerasCV components: - CLASS: class of the keypoints - CONFIDENCE: confidence of the keypoints """ X = 0 Y = 1 CLASS = 2 CONFIDENCE = 3
keras-cv/keras_cv/keypoint/formats.py/0
{ "file_path": "keras-cv/keras_cv/keypoint/formats.py", "repo_id": "keras-cv", "token_count": 566 }
4
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops @keras_cv_export("keras_cv.layers.BoxMatcher") class BoxMatcher(keras.layers.Layer): """Box matching logic based on argmax of highest value (e.g., IOU). This class computes matches from a similarity matrix. Each row will be matched to at least one column, the matched result can either be positive / negative, or simply ignored depending on the setting. The settings include `thresholds` and `match_values`, for example if: 1) thresholds=[negative_threshold, positive_threshold], and match_values=[negative_value=0, ignore_value=-1, positive_value=1]: the rows will be assigned to positive_value if its argmax result >= positive_threshold; the rows will be assigned to negative_value if its argmax result < negative_threshold, and the rows will be assigned to ignore_value if its argmax result is between [negative_threshold, positive_threshold). 2) thresholds=[negative_threshold, positive_threshold], and match_values=[ignore_value=-1, negative_value=0, positive_value=1]: the rows will be assigned to positive_value if its argmax result >= positive_threshold; the rows will be assigned to ignore_value if its argmax result < negative_threshold, and the rows will be assigned to negative_value if its argmax result is between [negative_threshold, positive_threshold). This is different from case 1) by swapping first two values. 3) thresholds=[positive_threshold], and match_values=[negative_values, positive_value]: the rows will be assigned to positive value if its argmax result >= positive_threshold; the rows will be assigned to negative_value if its argmax result < negative_threshold. Args: thresholds: A sorted list of floats to classify the matches into different results (e.g. positive or negative or ignored match). The list will be prepended with -Inf and and appended with +Inf. match_values: A list of integers representing matched results (e.g. positive or negative or ignored match). len(`match_values`) must equal to len(`thresholds`) + 1. force_match_for_each_col: each row will be argmax matched to at least one column. This means some columns will be matched to multiple rows while some columns will not be matched to any rows. Filtering by `thresholds` will make less columns match to positive result. Setting this to True guarantees that each column will be matched to positive result to at least one row. Raises: ValueError: if `thresholds` not sorted or len(`match_values`) != len(`thresholds`) + 1 Usage: ```python box_matcher = keras_cv.layers.BoxMatcher([0.3, 0.7], [-1, 0, 1]) iou_metric = keras_cv.bounding_box.compute_iou(anchors, boxes) matched_columns, matched_match_values = box_matcher(iou_metric) cls_mask = ops.less_equal(matched_match_values, 0) ``` TODO(tanzhenyu): document when to use which mode. """ def __init__( self, thresholds: List[float], match_values: List[int], force_match_for_each_col: bool = False, **kwargs, ): super().__init__(**kwargs) if sorted(thresholds) != thresholds: raise ValueError(f"`threshold` must be sorted, got {thresholds}") self.match_values = match_values if len(match_values) != len(thresholds) + 1: raise ValueError( f"len(`match_values`) must be len(`thresholds`) + 1, got " f"match_values {match_values}, thresholds {thresholds}" ) thresholds.insert(0, -float("inf")) thresholds.append(float("inf")) self.thresholds = thresholds self.force_match_for_each_col = force_match_for_each_col self.built = True def call(self, similarity_matrix): """Matches each row to a column based on argmax TODO(tanzhenyu): consider swapping rows and cols. Args: similarity_matrix: A float Tensor of shape [num_rows, num_cols] or [batch_size, num_rows, num_cols] representing any similarity metric. Returns: matched_columns: An integer tensor of shape [num_rows] or [batch_size, num_rows] storing the index of the matched colum for each row. matched_values: An integer tensor of shape [num_rows] or [batch_size, num_rows] storing the match result (positive match, negative match, ignored match). """ squeeze_result = False if len(similarity_matrix.shape) == 2: squeeze_result = True similarity_matrix = ops.expand_dims(similarity_matrix, axis=0) static_shape = list(similarity_matrix.shape) num_rows = static_shape[1] or ops.shape(similarity_matrix)[1] batch_size = static_shape[0] or ops.shape(similarity_matrix)[0] def _match_when_cols_are_empty(): """Performs matching when the rows of similarity matrix are empty. When the rows are empty, all detections are false positives. So we return a tensor of -1's to indicate that the rows do not match to any columns. Returns: matched_columns: An integer tensor of shape [batch_size, num_rows] storing the index of the matched column for each row. matched_values: An integer tensor of shape [batch_size, num_rows] storing the match type indicator (e.g. positive or negative or ignored match). """ with ops.name_scope("empty_boxes"): matched_columns = ops.zeros( [batch_size, num_rows], dtype="int32" ) matched_values = -ops.ones( [batch_size, num_rows], dtype="int32" ) return matched_columns, matched_values def _match_when_cols_are_non_empty(): """Performs matching when the rows of similarity matrix are non-empty. Returns: matched_columns: An integer tensor of shape [batch_size, num_rows] storing the index of the matched column for each row. matched_values: An integer tensor of shape [batch_size, num_rows] storing the match type indicator (e.g. positive or negative or ignored match). """ with ops.name_scope("non_empty_boxes"): # Jax traces this function even when running eagerly and the # columns are non-empty. Therefore, we need to handle the case # where the similarity matrix is empty. We do this by padding # some -1s to the end. -1s are guaranteed to not affect argmax # matching because all values in a similarity matrix are [0,1] # and the indexing won't change because these are added at the # end. padded_similarity_matrix = ops.concatenate( [similarity_matrix, -ops.ones((batch_size, num_rows, 1))], axis=-1, ) matched_columns = ops.argmax( padded_similarity_matrix, axis=-1, ) # Get logical indices of ignored and unmatched columns as int32 matched_vals = ops.max(padded_similarity_matrix, axis=-1) matched_values = ops.zeros([batch_size, num_rows], "int32") match_dtype = matched_vals.dtype for ind, low, high in zip( self.match_values, self.thresholds[:-1], self.thresholds[1:] ): low_threshold = ops.cast(low, match_dtype) high_threshold = ops.cast(high, match_dtype) mask = ops.logical_and( ops.greater_equal(matched_vals, low_threshold), ops.less(matched_vals, high_threshold), ) matched_values = self._set_values_using_indicator( matched_values, mask, ind ) if self.force_match_for_each_col: # [batch_size, num_cols], for each column (groundtruth_box), # find the best matching row (anchor). matching_rows = ops.argmax( padded_similarity_matrix, axis=1, ) # [batch_size, num_cols, num_rows], a transposed 0-1 mapping # matrix M, where M[j, i] = 1 means column j is matched to # row i. column_to_row_match_mapping = ops.one_hot( matching_rows, num_rows ) # [batch_size, num_rows], for each row (anchor), find the # matched column (groundtruth_box). force_matched_columns = ops.argmax( column_to_row_match_mapping, axis=1, ) # [batch_size, num_rows] force_matched_column_mask = ops.cast( ops.max(column_to_row_match_mapping, axis=1), "bool", ) # [batch_size, num_rows] matched_columns = ops.where( force_matched_column_mask, force_matched_columns, matched_columns, ) matched_values = ops.where( force_matched_column_mask, self.match_values[-1] * ops.ones([batch_size, num_rows], dtype="int32"), matched_values, ) return ops.cast(matched_columns, "int32"), matched_values num_boxes = ( similarity_matrix.shape[-1] or ops.shape(similarity_matrix)[-1] ) matched_columns, matched_values = ops.cond( pred=ops.greater(num_boxes, 0), true_fn=_match_when_cols_are_non_empty, false_fn=_match_when_cols_are_empty, ) if squeeze_result: matched_columns = ops.squeeze(matched_columns, axis=0) matched_values = ops.squeeze(matched_values, axis=0) return matched_columns, matched_values def _set_values_using_indicator(self, x, indicator, val): """Set the indicated fields of x to val. Args: x: tensor. indicator: boolean with same shape as x. val: scalar with value to set. Returns: modified tensor. """ indicator = ops.cast(indicator, x.dtype) return ops.add(ops.multiply(x, 1 - indicator), val * indicator) def get_config(self): config = { "thresholds": self.thresholds[1:-1], "match_values": self.match_values, "force_match_for_each_col": self.force_match_for_each_col, } return config
keras-cv/keras_cv/layers/object_detection/box_matcher.py/0
{ "file_path": "keras-cv/keras_cv/layers/object_detection/box_matcher.py", "repo_id": "keras-cv", "token_count": 5538 }
5
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import tensorflow as tf from keras_cv.layers.object_detection.sampling import balanced_sample from keras_cv.tests.test_case import TestCase @pytest.mark.tf_keras_only class BalancedSamplingTest(TestCase): def test_balanced_sampling(self): positive_matches = tf.constant( [ True, False, False, False, False, False, False, False, False, False, ] ) negative_matches = tf.constant( [False, True, True, True, True, True, True, True, True, True] ) num_samples = 5 positive_fraction = 0.2 res = balanced_sample( positive_matches, negative_matches, num_samples, positive_fraction ) # The 1st element must be selected, given it's the only one. self.assertAllClose(res[0], 1) def test_balanced_batched_sampling(self): positive_matches = tf.constant( [ [ True, False, False, False, False, False, False, False, False, False, ], [ False, False, False, False, False, False, True, False, False, False, ], ] ) negative_matches = tf.constant( [ [False, True, True, True, True, True, True, True, True, True], [True, True, True, True, True, True, False, True, True, True], ] ) num_samples = 5 positive_fraction = 0.2 res = balanced_sample( positive_matches, negative_matches, num_samples, positive_fraction ) # the 1st element from the 1st batch must be selected, given it's the # only one self.assertAllClose(res[0][0], 1) # the 7th element from the 2nd batch must be selected, given it's the # only one self.assertAllClose(res[1][6], 1) def test_balanced_sampling_over_positive_fraction(self): positive_matches = tf.constant( [ True, False, False, False, False, False, False, False, False, False, ] ) negative_matches = tf.constant( [False, True, True, True, True, True, True, True, True, True] ) num_samples = 5 positive_fraction = 0.4 res = balanced_sample( positive_matches, negative_matches, num_samples, positive_fraction ) # only 1 positive sample exists, thus it is chosen self.assertAllClose(res[0], 1) def test_balanced_sampling_under_positive_fraction(self): positive_matches = tf.constant( [ True, False, False, False, False, False, False, False, False, False, ] ) negative_matches = tf.constant( [False, True, True, True, True, True, True, True, True, True] ) num_samples = 5 positive_fraction = 0.1 res = balanced_sample( positive_matches, negative_matches, num_samples, positive_fraction ) # no positive is chosen self.assertAllClose(res[0], 0) self.assertAllClose(tf.reduce_sum(res), 5) def test_balanced_sampling_over_num_samples(self): positive_matches = tf.constant( [ True, False, False, False, False, False, False, False, False, False, ] ) negative_matches = tf.constant( [False, True, True, True, True, True, True, True, True, True] ) # users want to get 20 samples, but only 10 are available num_samples = 20 positive_fraction = 0.1 with self.assertRaisesRegex(ValueError, "has less element"): _ = balanced_sample( positive_matches, negative_matches, num_samples, positive_fraction, ) def test_balanced_sampling_no_positive(self): positive_matches = tf.constant( [ False, False, False, False, False, False, False, False, False, False, ] ) # the rest are neither positive nor negative, but ignored matches negative_matches = tf.constant( [False, False, True, False, False, True, False, False, True, False] ) num_samples = 5 positive_fraction = 0.5 res = balanced_sample( positive_matches, negative_matches, num_samples, positive_fraction ) # given only 3 negative and 0 positive, select all of them self.assertAllClose(res, [0, 0, 1, 0, 0, 1, 0, 0, 1, 0]) def test_balanced_sampling_no_negative(self): positive_matches = tf.constant( [True, True, False, False, False, False, False, False, False, False] ) # 2-9 indices are neither positive nor negative, they're ignored matches negative_matches = tf.constant([False] * 10) num_samples = 5 positive_fraction = 0.5 res = balanced_sample( positive_matches, negative_matches, num_samples, positive_fraction ) # given only 2 positive and 0 negative, select all of them. self.assertAllClose(res, [1, 1, 0, 0, 0, 0, 0, 0, 0, 0]) def test_balanced_sampling_many_samples(self): positive_matches = tf.random.uniform( [2, 1000], minval=0, maxval=1, dtype=tf.float32 ) positive_matches = positive_matches > 0.98 negative_matches = tf.logical_not(positive_matches) num_samples = 256 positive_fraction = 0.25 _ = balanced_sample( positive_matches, negative_matches, num_samples, positive_fraction )
keras-cv/keras_cv/layers/object_detection/sampling_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/object_detection/sampling_test.py", "repo_id": "keras-cv", "token_count": 3917 }
6
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import keras import tensorflow as tf import tree if hasattr(keras, "src"): keras_backend = keras.src.backend else: keras_backend = keras.backend from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.backend import config from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.backend import scope from keras_cv.utils import preprocessing # In order to support both unbatched and batched inputs, the horizontal # and vertical axis is reverse indexed H_AXIS = -3 W_AXIS = -2 IMAGES = "images" LABELS = "labels" TARGETS = "targets" BOUNDING_BOXES = "bounding_boxes" KEYPOINTS = "keypoints" SEGMENTATION_MASKS = "segmentation_masks" IS_DICT = "is_dict" USE_TARGETS = "use_targets" @keras_cv_export("keras_cv.layers.BaseImageAugmentationLayer") class BaseImageAugmentationLayer(keras.layers.Layer): """Abstract base layer for image augmentation. This layer contains base functionalities for preprocessing layers which augment image related data, e.g. image and in the future, label and bounding boxes. The subclasses could avoid making certain mistakes and reduce code duplications. This layer requires you to implement one method: `augment_image()`, which augments one single image during the training. There are a few additional methods that you can implement for added functionality on the layer: `augment_label()`, which handles label augmentation if the layer supports that. `augment_bounding_boxes()`, which handles the bounding box augmentation, if the layer supports that. `get_random_transformation()`, which should produce a random transformation setting. The transformation object, which could be of any type, will be passed to `augment_image`, `augment_label` and `augment_bounding_boxes`, to coordinate the randomness behaviour, e.g., in the RandomFlip layer, the image and bounding_boxes should be changed in the same way. The `call()` method supports two formats of inputs: 1. A single image tensor with shape (height, width, channels) or (batch_size, height, width, channels) 2. A dict of tensors with any of the following keys (note that `"images"` must be present): * `"images"` - Image Tensor with shape (height, width, channels) or (batch_size, height, width, channels) * `"labels"` - One-hot encoded classification labels Tensor with shape (num_classes) or (batch_size, num_classes) * `"bounding_boxes"` - A dictionary with keys: * `"boxes"` - Tensor with shape (num_boxes, 4) or (batch_size, num_boxes, 4) * `"classes"` - Tensor of class labels for boxes with shape (num_boxes, num_classes) or (batch_size, num_boxes, num_classes). Any other keys included in this dictionary will be ignored and unmodified by an augmentation layer. The output of the `call()` will be the same structure as the inputs. The `call()` will unpack the inputs, forward to the correct function, and pack the output back to the same structure as the inputs. By default, the `call()` method leverages the `tf.vectorized_map()` function. Auto-vectorization can be disabled by setting `self.auto_vectorize = False` in your `__init__()` method. When disabled, `call()` instead relies on `tf.map_fn()`. For example: ```python class SubclassLayer(keras_cv.BaseImageAugmentationLayer): def __init__(self): super().__init__() self.auto_vectorize = False ``` Example: ```python class RandomContrast(keras_cv.BaseImageAugmentationLayer): def __init__(self, factor=(0.5, 1.5), **kwargs): super().__init__(**kwargs) self._factor = factor def augment_image(self, image, transformation): random_factor = tf.random.uniform([], self._factor[0], self._factor[1]) mean = tf.math.reduced_mean(inputs, axis=-1, keep_dim=True) return (inputs - mean) * random_factor + mean ``` Note that since the randomness is also a common functionality, this layer also includes a keras_backend.RandomGenerator, which can be used to produce the random numbers. The random number generator is stored in the `self._random_generator` attribute. """ def __init__(self, seed=None, **kwargs): if seed is not None: self._random_generator = tf.random.Generator.from_seed(seed=seed) else: self._random_generator = tf.random.get_global_generator() super().__init__(**kwargs) self._allow_non_tensor_positional_args = True self.built = True self._convert_input_args = False @property def force_output_ragged_images(self): """Control whether to force outputting of ragged images.""" return getattr(self, "_force_output_ragged_images", False) @force_output_ragged_images.setter def force_output_ragged_images(self, force_output_ragged_images): self._force_output_ragged_images = force_output_ragged_images @property def force_output_dense_images(self): """Control whether to force outputting of dense images.""" return getattr(self, "_force_output_dense_images", False) @force_output_dense_images.setter def force_output_dense_images(self, force_output_dense_images): self._force_output_dense_images = force_output_dense_images @property def auto_vectorize(self): """Control whether automatic vectorization occurs. By default, the `call()` method leverages the `tf.vectorized_map()` function. Auto-vectorization can be disabled by setting `self.auto_vectorize = False` in your `__init__()` method. When disabled, `call()` instead relies on `tf.map_fn()`. For example: ```python class SubclassLayer(BaseImageAugmentationLayer): def __init__(self): super().__init__() self.auto_vectorize = False ``` """ return getattr(self, "_auto_vectorize", True) @auto_vectorize.setter def auto_vectorize(self, auto_vectorize): self._auto_vectorize = auto_vectorize def compute_image_signature(self, images): """Computes the output image signature for the `augment_image()` function. Must be overridden to return tensors with different shapes than the input images. By default, returns either a `tf.RaggedTensorSpec` matching the input image spec, or a `tf.TensorSpec` matching the input image spec. """ if self.force_output_dense_images: return tf.TensorSpec(images.shape[1:], self.compute_dtype) if self.force_output_ragged_images or isinstance( images, tf.RaggedTensor ): ragged_spec = tf.RaggedTensorSpec( shape=images.shape[1:], ragged_rank=1, dtype=self.compute_dtype, ) return ragged_spec return tf.TensorSpec(images.shape[1:], self.compute_dtype) # TODO(lukewood): promote to user facing API if needed def _compute_bounding_box_signature(self, bounding_boxes): return { "boxes": tf.RaggedTensorSpec( shape=[None, 4], ragged_rank=1, dtype=self.compute_dtype, ), "classes": tf.RaggedTensorSpec( shape=[None], dtype=self.compute_dtype ), } # TODO(lukewood): promote to user facing API if needed def _compute_keypoints_signature(self, keypoints): if isinstance(keypoints, tf.RaggedTensor): ragged_spec = tf.RaggedTensorSpec( shape=keypoints.shape[1:], ragged_rank=1, dtype=self.compute_dtype, ) return ragged_spec return tf.TensorSpec( shape=keypoints.shape[1:], dtype=self.compute_dtype, ) # TODO(lukewood): promote to user facing API if needed def _compute_target_signature(self, targets): return tf.TensorSpec(targets.shape[1:], self.compute_dtype) def _compute_output_signature(self, inputs): fn_output_signature = { IMAGES: self.compute_image_signature(inputs[IMAGES]) } bounding_boxes = inputs.get(BOUNDING_BOXES, None) if bounding_boxes is not None: fn_output_signature[BOUNDING_BOXES] = ( self._compute_bounding_box_signature(bounding_boxes) ) segmentation_masks = inputs.get(SEGMENTATION_MASKS, None) if segmentation_masks is not None: fn_output_signature[SEGMENTATION_MASKS] = ( self.compute_image_signature(segmentation_masks) ) keypoints = inputs.get(KEYPOINTS, None) if keypoints is not None: fn_output_signature[KEYPOINTS] = self._compute_keypoints_signature( keypoints ) labels = inputs.get(LABELS, None) if labels is not None: fn_output_signature[LABELS] = self._compute_target_signature(labels) return fn_output_signature @staticmethod def _any_ragged(inputs): if isinstance(inputs[IMAGES], tf.RaggedTensor): return True if BOUNDING_BOXES in inputs: return True if KEYPOINTS in inputs: return True return False def _map_fn(self, func, inputs): """Returns either tf.map_fn or tf.vectorized_map based on the provided inputs. Args: inputs: dictionary of inputs provided to map_fn. """ if self._any_ragged(inputs) or self.force_output_ragged_images: return tf.map_fn( func, inputs, fn_output_signature=self._compute_output_signature(inputs), ) if self.auto_vectorize: return tf.vectorized_map(func, inputs) return tf.map_fn(func, inputs) def augment_image(self, image, transformation, **kwargs): """Augment a single image during training. Args: image: 3D image input tensor to the layer. Forwarded from `layer.call()`. transformation: The transformation object produced by `get_random_transformation`. Used to coordinate the randomness between image, label, bounding box, keypoints, and segmentation mask. Returns: output 3D tensor, which will be forward to `layer.call()`. """ raise NotImplementedError() def augment_label(self, label, transformation, **kwargs): """Augment a single label during training. Args: label: 1D label to the layer. Forwarded from `layer.call()`. transformation: The transformation object produced by `get_random_transformation`. Used to coordinate the randomness between image, label, bounding box, keypoints, and segmentation mask. Returns: output 1D tensor, which will be forward to `layer.call()`. """ raise NotImplementedError() def augment_target(self, target, transformation, **kwargs): """Augment a single target during training. Args: target: 1D label to the layer. Forwarded from `layer.call()`. transformation: The transformation object produced by `get_random_transformation`. Used to coordinate the randomness between image, label, bounding box, keypoints, and segmentation mask. Returns: output 1D tensor, which will be forward to `layer.call()`. """ return self.augment_label(target, transformation) def augment_bounding_boxes(self, bounding_boxes, transformation, **kwargs): """Augment bounding boxes for one image during training. Args: bounding_boxes: 2D bounding boxes to the layer. Forwarded from `call()`. transformation: The transformation object produced by `get_random_transformation`. Used to coordinate the randomness between image, label, bounding box, keypoints, and segmentation mask. Returns: output 2D tensor, which will be forward to `layer.call()`. """ raise NotImplementedError() def augment_keypoints(self, keypoints, transformation, **kwargs): """Augment keypoints for one image during training. Args: keypoints: 2D keypoints input tensor to the layer. Forwarded from `layer.call()`. transformation: The transformation object produced by `get_random_transformation`. Used to coordinate the randomness between image, label, bounding box, keypoints, and segmentation mask. Returns: output 2D tensor, which will be forward to `layer.call()`. """ raise NotImplementedError() def augment_segmentation_mask( self, segmentation_mask, transformation, **kwargs ): """Augment a single image's segmentation mask during training. Args: segmentation_mask: 3D segmentation mask input tensor to the layer. This should generally have the shape [H, W, 1], or in some cases [H, W, C] for multilabeled data. Forwarded from `layer.call()`. transformation: The transformation object produced by `get_random_transformation`. Used to coordinate the randomness between image, label, bounding box, keypoints, and segmentation mask. Returns: output 3D tensor containing the augmented segmentation mask, which will be forward to `layer.call()`. """ raise NotImplementedError() def get_random_transformation( self, image=None, label=None, bounding_boxes=None, keypoints=None, segmentation_mask=None, ): """Produce random transformation config for one single input. This is used to produce same randomness between image/label/bounding_box. Args: image: 3D image tensor from inputs. label: optional 1D label tensor from inputs. bounding_boxes: optional 2D bounding boxes tensor from inputs. segmentation_mask: optional 3D segmentation mask tensor from inputs. Returns: Any type of object, which will be forwarded to `augment_image`, `augment_label` and `augment_bounding_box` as the `transformation` parameter. """ return None def call(self, inputs): # try to convert a given backend native tensor to TensorFlow tensor # before passing it over to TFDataScope is_tf_backend = config.backend() == "tensorflow" is_in_tf_graph = not tf.executing_eagerly() contains_ragged = lambda y: any( tree.map_structure( lambda x: isinstance(x, (tf.RaggedTensor, tf.SparseTensor)), tree.flatten(y), ) ) inputs_contain_ragged = contains_ragged(inputs) if not is_tf_backend and not inputs_contain_ragged: inputs = tree.map_structure( lambda x: tf.convert_to_tensor(x), inputs ) with scope.TFDataScope(): inputs = self._ensure_inputs_are_compute_dtype(inputs) inputs, metadata = self._format_inputs(inputs) images = inputs[IMAGES] if images.shape.rank == 3: outputs = self._format_output(self._augment(inputs), metadata) elif images.shape.rank == 4: outputs = self._format_output( self._batch_augment(inputs), metadata ) else: raise ValueError( "Image augmentation layers are expecting inputs to be " "rank 3 (HWC) or 4D (NHWC) tensors. Got shape: " f"{images.shape}" ) # convert the outputs to backend native tensors if none of them # contain RaggedTensors. Note that if the user passed in Raggeds # but the outputs are dense, we still don't want to convert to # backend native tensors. This is to avoid breaking TF data # pipelines that can't easily be ported to become backend # agnostic. # Skip this step for TF backend or if in `tf.graph` like `tf.data`. if not is_tf_backend and not is_in_tf_graph: if not inputs_contain_ragged and not contains_ragged(outputs): outputs = tree.map_structure( # some layers return None, handle that case when # converting to tensors lambda x: ops.convert_to_tensor(x) if x is not None else x, outputs, ) return outputs def _augment(self, inputs): raw_image = inputs.get(IMAGES, None) image = raw_image label = inputs.get(LABELS, None) bounding_boxes = inputs.get(BOUNDING_BOXES, None) keypoints = inputs.get(KEYPOINTS, None) segmentation_mask = inputs.get(SEGMENTATION_MASKS, None) image_ragged = isinstance(image, tf.RaggedTensor) # At this point, the tensor is not actually ragged as we have mapped # over the batch axis. This call is required to make `tf.shape()` behave # as users subclassing the layer expect. if image_ragged: image = image.to_tensor() transformation = self.get_random_transformation( image=image, label=label, bounding_boxes=bounding_boxes, keypoints=keypoints, segmentation_mask=segmentation_mask, ) image = self.augment_image( image, transformation=transformation, bounding_boxes=bounding_boxes, label=label, ) if ( image_ragged and not self.force_output_dense_images ) or self.force_output_ragged_images: image = tf.RaggedTensor.from_tensor(image) result = {IMAGES: image} if label is not None: label = self.augment_target( label, transformation=transformation, bounding_boxes=bounding_boxes, image=image, ) result[LABELS] = label if bounding_boxes is not None: bounding_boxes = bounding_box.to_dense(bounding_boxes) bounding_boxes = self.augment_bounding_boxes( bounding_boxes, transformation=transformation, label=label, image=raw_image, ) bounding_boxes = bounding_box.to_ragged( bounding_boxes, dtype=self.compute_dtype ) result[BOUNDING_BOXES] = bounding_boxes if keypoints is not None: keypoints = self.augment_keypoints( keypoints, transformation=transformation, label=label, bounding_boxes=bounding_boxes, image=image, ) result[KEYPOINTS] = keypoints if segmentation_mask is not None: segmentation_mask = self.augment_segmentation_mask( segmentation_mask, transformation=transformation, ) result[SEGMENTATION_MASKS] = segmentation_mask # preserve any additional inputs unmodified by this layer. for key in inputs.keys() - result.keys(): result[key] = inputs[key] return result def _batch_augment(self, inputs): return self._map_fn(self._augment, inputs) def _format_inputs(self, inputs): metadata = {IS_DICT: True, USE_TARGETS: False} if tf.is_tensor(inputs): # single image input tensor metadata[IS_DICT] = False inputs = {IMAGES: inputs} return inputs, metadata if not isinstance(inputs, dict): raise ValueError( "Expect the inputs to be image tensor or dict. Got " f"inputs={inputs} of type {type(inputs)}" ) if BOUNDING_BOXES in inputs: inputs[BOUNDING_BOXES] = self._format_bounding_boxes( inputs[BOUNDING_BOXES] ) if isinstance(inputs, dict) and TARGETS in inputs: # TODO(scottzhu): Check if it only contains the valid keys inputs[LABELS] = inputs[TARGETS] del inputs[TARGETS] metadata[USE_TARGETS] = True return inputs, metadata return inputs, metadata def _format_bounding_boxes(self, bounding_boxes): # We can't catch the case where this is None, sometimes RaggedTensor # drops this dimension if "classes" not in bounding_boxes: raise ValueError( "Bounding boxes are missing class_id. If you would like to pad " "the bounding boxes with class_id, use: " "`bounding_boxes['classes'] = " "tf.ones_like(bounding_boxes['boxes'])`." ) return bounding_boxes def _format_output(self, output, metadata): if not metadata[IS_DICT]: return output[IMAGES] elif metadata[USE_TARGETS]: output[TARGETS] = output[LABELS] del output[LABELS] return output def _ensure_inputs_are_compute_dtype(self, inputs): if not isinstance(inputs, dict): return preprocessing.ensure_tensor( inputs, self.compute_dtype, ) # Copy the input dict before we mutate it. inputs = dict(inputs) inputs[IMAGES] = preprocessing.ensure_tensor( inputs[IMAGES], self.compute_dtype, ) if BOUNDING_BOXES in inputs: inputs[BOUNDING_BOXES] = bounding_box.ensure_tensor( inputs[BOUNDING_BOXES], dtype=self.compute_dtype ) return inputs
keras-cv/keras_cv/layers/preprocessing/base_image_augmentation_layer.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/base_image_augmentation_layer.py", "repo_id": "keras-cv", "token_count": 9825 }
7
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.layers.preprocessing.base_image_augmentation_layer import ( BaseImageAugmentationLayer, ) @keras_cv_export("keras_cv.layers.MixUp") class MixUp(BaseImageAugmentationLayer): """MixUp implements the MixUp data augmentation technique. Args: alpha: Float between 0 and 1. Inverse scale parameter for the gamma distribution. This controls the shape of the distribution from which the smoothing values are sampled. Defaults to 0.2, which is a recommended value when training an imagenet1k classification model. seed: Integer. Used to create a random seed. References: - [MixUp paper](https://arxiv.org/abs/1710.09412). - [MixUp for Object Detection paper](https://arxiv.org/pdf/1902.04103). Sample usage: ```python (images, labels), _ = keras.datasets.cifar10.load_data() images, labels = images[:10], labels[:10] # Labels must be floating-point and one-hot encoded labels = tf.cast(tf.one_hot(labels, 10), tf.float32) mixup = keras_cv.layers.preprocessing.MixUp(10) augmented_images, updated_labels = mixup( {'images': images, 'labels': labels} ) # output == {'images': updated_images, 'labels': updated_labels} ``` """ def __init__(self, alpha=0.2, seed=None, **kwargs): super().__init__(seed=seed, **kwargs) self.alpha = alpha self.seed = seed def _sample_from_beta(self, alpha, beta, shape): sample_alpha = tf.random.gamma( shape, alpha=alpha, ) sample_beta = tf.random.gamma( shape, alpha=beta, ) return sample_alpha / (sample_alpha + sample_beta) def _batch_augment(self, inputs): self._validate_inputs(inputs) images = inputs.get("images", None) labels = inputs.get("labels", None) bounding_boxes = inputs.get("bounding_boxes", None) segmentation_masks = inputs.get("segmentation_masks", None) images, lambda_sample, permutation_order = self._mixup(images) if labels is not None: labels = self._update_labels( tf.cast(labels, dtype=self.compute_dtype), lambda_sample, permutation_order, ) inputs["labels"] = labels if bounding_boxes is not None: bounding_boxes = self._update_bounding_boxes( bounding_boxes, permutation_order ) inputs["bounding_boxes"] = bounding_boxes inputs["images"] = images if segmentation_masks is not None: segmentation_masks = self._update_segmentation_masks( segmentation_masks, lambda_sample, permutation_order ) inputs["segmentation_masks"] = segmentation_masks return inputs def _augment(self, inputs): raise ValueError( "MixUp received a single image to `call`. The layer relies on " "combining multiple examples, and as such will not behave as " "expected. Please call the layer with 2 or more samples." ) def _mixup(self, images): batch_size = tf.shape(images)[0] permutation_order = tf.random.shuffle( tf.range(0, batch_size), seed=self.seed ) lambda_sample = self._sample_from_beta( self.alpha, self.alpha, (batch_size,) ) lambda_sample = tf.cast( tf.reshape(lambda_sample, [-1, 1, 1, 1]), dtype=self.compute_dtype ) mixup_images = tf.cast( tf.gather(images, permutation_order), dtype=self.compute_dtype ) images = lambda_sample * images + (1.0 - lambda_sample) * mixup_images return images, tf.squeeze(lambda_sample), permutation_order def _update_labels(self, labels, lambda_sample, permutation_order): labels_for_mixup = tf.gather(labels, permutation_order) lambda_sample = tf.reshape(lambda_sample, [-1, 1]) labels = ( lambda_sample * labels + (1.0 - lambda_sample) * labels_for_mixup ) return labels def _update_bounding_boxes(self, bounding_boxes, permutation_order): boxes, classes = bounding_boxes["boxes"], bounding_boxes["classes"] boxes_for_mixup = tf.gather(boxes, permutation_order) classes_for_mixup = tf.gather(classes, permutation_order) boxes = tf.concat([boxes, boxes_for_mixup], axis=1) classes = tf.concat([classes, classes_for_mixup], axis=1) return {"boxes": boxes, "classes": classes} def _update_segmentation_masks( self, segmentation_masks, lambda_sample, permutation_order ): lambda_sample = tf.reshape(lambda_sample, [-1, 1, 1, 1]) segmentation_masks_for_mixup = tf.gather( segmentation_masks, permutation_order ) segmentation_masks = ( lambda_sample * segmentation_masks + (1.0 - lambda_sample) * segmentation_masks_for_mixup ) return segmentation_masks def _validate_inputs(self, inputs): images = inputs.get("images", None) labels = inputs.get("labels", None) bounding_boxes = inputs.get("bounding_boxes", None) segmentation_masks = inputs.get("segmentation_masks", None) if images is None or ( labels is None and bounding_boxes is None and segmentation_masks is None ): raise ValueError( "MixUp expects inputs in a dictionary with format " '{"images": images, "labels": labels}. or' '{"images": images, "bounding_boxes": bounding_boxes}. or' '{"images": images, "segmentation_masks": segmentation_masks}. ' f"Got: inputs = {inputs}." ) if labels is not None and not labels.dtype.is_floating: raise ValueError( f"MixUp received labels with type {labels.dtype}. " "Labels must be of type float." ) if bounding_boxes is not None: _ = bounding_box.validate_format(bounding_boxes) if segmentation_masks is not None: if len(segmentation_masks.shape) != 4: raise ValueError( "MixUp expects shape of segmentation_masks as " "[batch, h, w, num_classes]. " f"Got: shape = {segmentation_masks.shape}. " ) def get_config(self): config = { "alpha": self.alpha, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/layers/preprocessing/mix_up.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/mix_up.py", "repo_id": "keras-cv", "token_count": 3236 }
8
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv import core from keras_cv.layers import preprocessing from keras_cv.tests.test_case import TestCase class RandomBrightnessTest(TestCase): def test_preserves_output_shape(self): image_shape = (4, 8, 8, 3) image = tf.random.uniform(shape=image_shape) * 255.0 layer = preprocessing.RandomBrightness(factor=(0.3, 0.8)) output = layer(image) self.assertEqual(image.shape, output.shape) self.assertNotAllClose(image, output) def test_no_adjustment_for_factor_zero(self): image_shape = (4, 8, 8, 3) image = tf.random.uniform(shape=image_shape) * 255.0 layer = preprocessing.RandomBrightness(factor=0) output = layer(image) self.assertAllClose(image, output, atol=1e-5, rtol=1e-5) def test_max_brightness(self): image_shape = (4, 8, 8, 3) image = tf.random.uniform(shape=image_shape) * 255.0 layer = preprocessing.RandomBrightness(factor=(1, 1)) output = layer(image) self.assertAllClose( output, tf.fill((4, 8, 8, 3), 255), atol=1e-5, rtol=1e-5 ) def test_max_brightness_rescaled_value_range(self): image_shape = (4, 8, 8, 3) image = tf.random.uniform(shape=image_shape) layer = preprocessing.RandomBrightness( value_range=(0, 1), factor=(1, 1) ) output = layer(image) self.assertAllClose( output, tf.fill((4, 8, 8, 3), 1), atol=1e-5, rtol=1e-5 ) def test_zero_brightness(self): image_shape = (4, 8, 8, 3) image = tf.random.uniform(shape=image_shape) * 255.0 layer = preprocessing.RandomBrightness(factor=(-1, -1)) output = layer(image) self.assertAllClose( output, tf.fill((4, 8, 8, 3), 0), atol=1e-5, rtol=1e-5 ) def test_with_unit8(self): image_shape = (4, 8, 8, 3) image = tf.cast( tf.random.uniform(shape=image_shape) * 255.0, dtype=tf.uint8 ) layer = preprocessing.RandomBrightness(factor=0) output = layer(image) self.assertAllClose(image, output, atol=1e-5, rtol=1e-5) layer = preprocessing.RandomBrightness(factor=(0.3, 0.8)) output = layer(image) self.assertNotAllClose(image, output) def test_config(self): layer = preprocessing.RandomBrightness( value_range=(0, 1), factor=(0.3, 0.8) ) config = layer.get_config() self.assertTrue(isinstance(config["factor"], core.UniformFactorSampler)) self.assertEqual(config["factor"].get_config()["lower"], 0.3) self.assertEqual(config["factor"].get_config()["upper"], 0.8) self.assertEqual(config["value_range"], (0, 1))
keras-cv/keras_cv/layers/preprocessing/random_brightness_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_brightness_test.py", "repo_id": "keras-cv", "token_count": 1443 }
9
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import tensorflow as tf from keras_cv.backend import ops from keras_cv.layers import preprocessing from keras_cv.tests.test_case import TestCase class RandomCutoutTest(TestCase): def _run_test(self, height_factor, width_factor): img_shape = (40, 40, 3) xs = tf.stack( [2 * np.ones(img_shape), np.ones(img_shape)], axis=0, ) xs = tf.cast(xs, tf.float32) fill_value = 0.0 layer = preprocessing.RandomCutout( height_factor=height_factor, width_factor=width_factor, fill_mode="constant", fill_value=fill_value, seed=1, ) xs = layer(xs) # Some pixels should be replaced with fill value self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == fill_value)) self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0)) self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == fill_value)) self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0)) def test_return_shapes(self): xs = np.ones((2, 512, 512, 3)) ys_segmentation_masks = np.ones((2, 512, 512, 3)) layer = preprocessing.RandomCutout( height_factor=0.5, width_factor=0.5, seed=1 ) xs = layer(xs) ys_segmentation_masks = layer(ys_segmentation_masks) self.assertEqual(xs.shape, (2, 512, 512, 3)) self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3)) def test_return_shapes_single_element(self): xs = np.ones((512, 512, 3)) ys_segmentation_masks = np.ones((512, 512, 3)) layer = preprocessing.RandomCutout( height_factor=0.5, width_factor=0.5, seed=1 ) xs = layer(xs) ys_segmentation_masks = layer(ys_segmentation_masks) self.assertEqual(xs.shape, (512, 512, 3)) self.assertEqual(ys_segmentation_masks.shape, (512, 512, 3)) def test_random_cutout_single_float(self): self._run_test(0.5, 0.5) def test_random_cutout_tuple_float(self): self._run_test((0.4, 0.9), (0.1, 0.3)) def test_random_cutout_fail_mix_bad_param_values(self): fn = lambda: self._run_test(0.5, (15.0, 30)) self.assertRaises(ValueError, fn) def test_random_cutout_fail_reverse_lower_upper_float(self): fn = lambda: self._run_test(0.5, (0.9, 0.4)) self.assertRaises(ValueError, fn) def test_random_cutout_call_results_one_channel(self): xs = tf.cast( tf.stack( [2 * np.ones((40, 40, 1)), np.ones((40, 40, 1))], axis=0, ), tf.float32, ) patch_value = 0.0 layer = preprocessing.RandomCutout( height_factor=0.5, width_factor=0.5, fill_mode="constant", fill_value=patch_value, seed=1, ) xs = layer(xs) # Some pixels should be replaced with fill value self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == patch_value)) self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0)) self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == patch_value)) self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0)) def test_random_cutout_call_tiny_image(self): img_shape = (4, 4, 3) xs = tf.stack( [2 * np.ones(img_shape), np.ones(img_shape)], axis=0, ) xs = tf.cast(xs, tf.float32) fill_value = 0.0 layer = preprocessing.RandomCutout( height_factor=(0.4, 0.9), width_factor=(0.1, 0.3), fill_mode="constant", fill_value=fill_value, seed=1, ) xs = layer(xs) # Some pixels should be replaced with fill value self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == fill_value)) self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0)) self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == fill_value)) self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0)) @pytest.mark.tf_only def test_in_tf_function(self): xs = tf.cast( tf.stack( [2 * np.ones((100, 100, 1)), np.ones((100, 100, 1))], axis=0 ), tf.float32, ) patch_value = 0.0 layer = preprocessing.RandomCutout( height_factor=0.5, width_factor=0.5, fill_mode="constant", fill_value=patch_value, seed=1, ) @tf.function def augment(x): return layer(x) xs = augment(xs) # Some pixels should be replaced with fill value self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == patch_value)) self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0)) self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == patch_value)) self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
keras-cv/keras_cv/layers/preprocessing/random_cutout_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_cutout_test.py", "repo_id": "keras-cv", "token_count": 2765 }
10
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import tensorflow as tf from keras_cv import bounding_box from keras_cv.backend import ops from keras_cv.layers import preprocessing from keras_cv.tests.test_case import TestCase num_classes = 10 class RandomShearTest(TestCase): def test_aggressive_shear_fills_at_least_some_pixels(self): img_shape = (50, 50, 3) xs = tf.stack( [2 * tf.ones(img_shape), tf.ones(img_shape)], axis=0, ) ys_segmentation_masks = tf.stack( [2 * tf.ones(img_shape), tf.ones(img_shape)], axis=0, ) xs = tf.cast(xs, tf.float32) ys_segmentation_masks = tf.cast(ys_segmentation_masks, tf.float32) fill_value = 0.0 layer = preprocessing.RandomShear( x_factor=(3, 3), seed=0, fill_mode="constant", fill_value=fill_value ) xs = layer(xs) ys_segmentation_masks = layer(ys_segmentation_masks) # Some pixels should be replaced with fill value self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == fill_value)) self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0)) self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == fill_value)) self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0)) self.assertTrue( np.any(ops.convert_to_numpy(ys_segmentation_masks[0]) == fill_value) ) self.assertTrue( np.any(ops.convert_to_numpy(ys_segmentation_masks[0]) == 2.0) ) self.assertTrue( np.any(ops.convert_to_numpy(ys_segmentation_masks[1]) == fill_value) ) self.assertTrue( np.any(ops.convert_to_numpy(ys_segmentation_masks[1]) == 1.0) ) def test_return_shapes(self): """test return dict keys and value pairs""" xs = tf.ones((2, 512, 512, 3)) # randomly sample labels ys_labels = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 2) ys_labels = tf.squeeze(ys_labels) ys_labels = tf.one_hot(ys_labels, num_classes) # randomly sample bounding boxes ys_bounding_boxes = { "boxes": tf.ones((2, 3, 4)), "classes": tf.random.uniform((2, 3), 0, 1), } # randomly sample segmentation masks ys_segmentation_masks = tf.ones((2, 512, 512, 3)) layer = preprocessing.RandomShear( x_factor=(0.1, 0.3), y_factor=(0.1, 0.3), seed=0, fill_mode="constant", bounding_box_format="xywh", ) outputs = layer( { "images": xs, "targets": ys_labels, "bounding_boxes": ys_bounding_boxes, "segmentation_masks": ys_segmentation_masks, } ) xs, ys_labels, ys_bounding_boxes, ys_segmentation_masks = ( outputs["images"], outputs["targets"], outputs["bounding_boxes"], outputs["segmentation_masks"], ) ys_bounding_boxes = bounding_box.to_dense(ys_bounding_boxes) self.assertEqual(xs.shape, (2, 512, 512, 3)) self.assertEqual(ys_labels.shape, (2, 10)) self.assertEqual(ys_bounding_boxes["boxes"].shape, (2, 3, 4)) self.assertEqual(ys_bounding_boxes["classes"].shape, (2, 3)) self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3)) def test_single_image_input(self): """test for single image input""" xs = tf.ones((512, 512, 3)) inputs = {"images": xs} layer = preprocessing.RandomShear( x_factor=(3, 3), seed=0, fill_mode="constant", ) outputs = layer(inputs) self.assertEqual(outputs["images"].shape, (512, 512, 3)) @pytest.mark.skip(reason="Flaky") def test_area(self): xs = tf.ones((1, 512, 512, 3)) ys = { "boxes": tf.constant( [[[0.3, 0.4, 0.5, 0.6], [0.9, 0.8, 1.0, 1.0]]] ), "classes": tf.constant([2, 3]), } inputs = {"images": xs, "bounding_boxes": ys} layer = preprocessing.RandomShear( x_factor=(0.3, 0.7), y_factor=(0.4, 0.7), seed=0, fill_mode="constant", bounding_box_format="rel_xyxy", ) outputs = layer(inputs) xs, ys_bounding_boxes = ( outputs["images"], outputs["bounding_boxes"]["boxes"], ) new_area = tf.math.multiply( tf.abs( tf.subtract( ys_bounding_boxes[..., 2], ys_bounding_boxes[..., 0] ) ), tf.abs( tf.subtract( ys_bounding_boxes[..., 3], ys_bounding_boxes[..., 1] ) ), ) old_area = tf.math.multiply( tf.abs(tf.subtract(ys["boxes"][..., 2], ys["boxes"][..., 0])), tf.abs(tf.subtract(ys["boxes"][..., 3], ys["boxes"][..., 1])), ) self.assertTrue(tf.math.reduce_all(new_area > old_area)) @pytest.mark.tf_only def test_in_tf_function(self): """test for class works with tf function""" xs = tf.cast( tf.stack( [2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))], axis=0, ), tf.float32, ) layer = preprocessing.RandomShear( x_factor=0.2, y_factor=0.2, bounding_box_format="xywh" ) ys = { "boxes": tf.random.uniform((2, 3, 4), 0, 1), "classes": tf.random.uniform((2, 3), 0, 1), } @tf.function def augment(x, y): return layer({"images": x, "bounding_boxes": y}) outputs = augment(xs, ys) xs = outputs["images"] # None of the individual values should still be close to 1 or 0 self.assertNotAllClose(xs, 1.0) self.assertNotAllClose(xs, 2.0) def test_no_augmentation(self): """test for no image and bbox augmentation when x_factor,y_factor is 0,0""" xs = tf.cast( tf.stack( [2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))], axis=0, ), tf.float32, ) ys = { "boxes": tf.constant( [ [[0.3, 0.4, 0.5, 0.6], [0.9, 0.8, 1.0, 1.0]], [[0.3, 0.4, 0.5, 0.6], [0.9, 0.8, 1.0, 1.0]], ], dtype=tf.float32, ), "classes": tf.constant([[0, 0], [0, 0]], dtype=tf.float32), } layer = preprocessing.RandomShear( x_factor=0, y_factor=0, bounding_box_format="rel_xyxy" ) outputs = layer({"images": xs, "bounding_boxes": ys}) output_xs, output_ys = outputs["images"], outputs["bounding_boxes"] ys = bounding_box.to_dense(ys) output_ys = bounding_box.to_dense(output_ys) self.assertAllEqual(xs, output_xs) self.assertAllEqual(ys["boxes"], output_ys["boxes"]) # TODO re-enable when bounding box augmentation is fixed. def DISABLED_test_output_values(self): """test to verify augmented bounding box output coordinate""" xs = tf.cast( tf.stack( [2 * tf.ones((100, 100, 3)), tf.zeros((100, 100, 3))], axis=0, ), tf.float32, ) ys = tf.cast( tf.stack( [ tf.constant( [[10.0, 20.0, 40.0, 50.0], [12.0, 22.0, 42.0, 54.0]] ), tf.constant( [[10.0, 20.0, 40.0, 50.0], [12.0, 22.0, 42.0, 54.0]] ), ], axis=0, ), tf.float32, ) ys = bounding_box.add_class_id(ys) true_ys = tf.cast( tf.stack( [ tf.constant( [ [7.60, 20.43, 39.04, 51.79, 0.0], [9.41, 22.52, 40.94, 55.88, 0.0], ] ), tf.constant( [ [13.68, 22.51, 49.20, 59.05, 0], [16.04, 24.95, 51.940, 63.56, 0], ] ), ], axis=0, ), tf.float32, ) layer = preprocessing.RandomShear( x_factor=0.2, y_factor=0.2, bounding_box_format="xyxy", seed=1 ) outputs = layer({"images": xs, "bounding_boxes": ys}) _, output_ys = outputs["images"], outputs["bounding_boxes"].to_tensor() self.assertAllClose(true_ys, output_ys, rtol=1e-02, atol=1e-03) def test_random_shear_on_batched_images_independently(self): image = tf.random.uniform(shape=(100, 100, 3)) input_images = tf.stack([image, image], axis=0) layer = preprocessing.RandomShear(x_factor=0.5, y_factor=0.5) results = layer(input_images) self.assertNotAllClose(results[0], results[1]) def test_ragged_bounding_box(self): images = tf.random.uniform((2, 16, 16, 3)) random_box = tf.constant( [[[0.1, 0.2, 1, 1], [0.4, 0.6, 1, 1]]], dtype=tf.float32 ) random_box = tf.squeeze(random_box, axis=0) random_box = tf.RaggedTensor.from_row_lengths(random_box, [1, 1]) classes = tf.ragged.constant([[0], [0]]) bounding_boxes = {"boxes": random_box, "classes": classes} inputs = {"images": images, "bounding_boxes": bounding_boxes} layer = preprocessing.RandomShear( x_factor=(0.5, 0.5), y_factor=(0.5, 0.5), bounding_box_format="rel_xywh", ) layer(inputs)
keras-cv/keras_cv/layers/preprocessing/random_shear_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_shear_test.py", "repo_id": "keras-cv", "token_count": 5788 }
11
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from absl.testing import parameterized from keras_cv import layers from keras_cv.backend import keras from keras_cv.tests.test_case import TestCase TEST_CONFIGURATIONS = [ ("AutoContrast", layers.AutoContrast, {"value_range": (0, 255)}), ("ChannelShuffle", layers.ChannelShuffle, {}), ("Equalization", layers.Equalization, {"value_range": (0, 255)}), ( "RandomCropAndResize", layers.RandomCropAndResize, { "target_size": (224, 224), "crop_area_factor": (0.8, 1.0), "aspect_ratio_factor": (3 / 4, 4 / 3), "bounding_box_format": "xywh", "dtype": "float32", }, ), ("Grayscale", layers.Grayscale, {}), ("GridMask", layers.GridMask, {}), ( "Posterization", layers.Posterization, {"bits": 3, "value_range": (0, 255)}, ), ("RandomBrightness", layers.RandomBrightness, {"factor": 0.5}), ( "RandomColorDegeneration", layers.RandomColorDegeneration, {"factor": 0.5}, ), ( "RandomCutout", layers.RandomCutout, {"height_factor": 0.2, "width_factor": 0.2}, ), ( "RandomFlip", layers.RandomFlip, {"mode": "horizontal", "bounding_box_format": "xyxy"}, ), ( "RandomHue", layers.RandomHue, {"factor": 0.5, "value_range": (0, 255)}, ), ( "RandomTranslation", layers.RandomTranslation, { "width_factor": 0.5, "height_factor": 0.5, "bounding_box_format": "xyxy", "dtype": "float32", }, ), ( "RandomChannelShift", layers.RandomChannelShift, {"value_range": (0, 255), "factor": 0.5}, ), ( "RandomColorJitter", layers.RandomColorJitter, { "value_range": (0, 255), "brightness_factor": (-0.2, 0.5), "contrast_factor": (0.5, 0.9), "saturation_factor": (0.5, 0.9), "hue_factor": (0.5, 0.9), "seed": 1, }, ), ( "RandomContrast", layers.RandomContrast, {"value_range": (0, 255), "factor": 0.5}, ), ( "RandomGaussianBlur", layers.RandomGaussianBlur, {"kernel_size": 3, "factor": (0.0, 3.0), "dtype": "float32"}, ), ( "RandomJpegQuality", layers.RandomJpegQuality, {"factor": (75, 100), "dtype": "float32"}, ), ( "RandomRotation", layers.RandomRotation, { "factor": 0.5, "bounding_box_format": "xyxy", "dtype": "float32", }, ), ("RandomSaturation", layers.RandomSaturation, {"factor": 0.5}), ( "RandomSharpness", layers.RandomSharpness, {"factor": 0.5, "value_range": (0, 255)}, ), ( "RandomAspectRatio", layers.RandomAspectRatio, { "factor": (0.9, 1.1), "bounding_box_format": "xyxy", "dtype": "float32", }, ), ( "RandomShear", layers.RandomShear, { "x_factor": 0.3, "x_factor": 0.3, "bounding_box_format": "xyxy", "dtype": "float32", }, ), ("Solarization", layers.Solarization, {"value_range": (0, 255)}), ( "Mosaic", layers.Mosaic, {"bounding_box_format": "xyxy"}, ), ("CutMix", layers.CutMix, {"dtype": "float32"}), ("MixUp", layers.MixUp, {}), ( "Resizing", layers.Resizing, { "height": 224, "width": 224, "bounding_box_format": "xyxy", "pad_to_aspect_ratio": True, "dtype": "float32", }, ), ( "JitteredResize", layers.JitteredResize, { "target_size": (224, 224), "scale_factor": (0.8, 1.25), "bounding_box_format": "xywh", "dtype": "float32", }, ), ( "RandomZoom", layers.RandomZoom, {"height_factor": 0.2, "width_factor": 0.5}, ), ( "RandomCrop", layers.RandomCrop, {"height": 224, "width": 224, "bounding_box_format": "xyxy"}, ), ( "Rescaling", layers.Rescaling, { "scale": 1, "offset": 0.5, }, ), ] NO_CPU_FP16_KERNEL_LAYERS = [ layers.RandomSaturation, layers.RandomColorJitter, layers.RandomHue, ] NO_BOUNDING_BOXES_TESTS = [ layers.RandomCutout, layers.RandomZoom, layers.CutMix, ] class WithMixedPrecisionTest(TestCase): @parameterized.named_parameters(*TEST_CONFIGURATIONS) def test_can_run_in_mixed_precision(self, layer_cls, init_args): if not tf.config.list_physical_devices("GPU"): if layer_cls in NO_CPU_FP16_KERNEL_LAYERS: self.skipTest( "There is currently no float16 CPU kernel registered for " "operations `tf.image.adjust_saturation`, and " "`tf.image.adjust_hue`. Skipping." ) keras.mixed_precision.set_global_policy("mixed_float16") img = tf.random.uniform( shape=(3, 512, 512, 3), minval=0, maxval=255, dtype=tf.float32 ) bounding_boxes = { "boxes": tf.convert_to_tensor( [ [ [200, 200, 400, 400], [250, 250, 450, 450], [300, 300, 500, 500], ], # Bounding boxes for image 1 [ [100, 100, 300, 300], [150, 150, 350, 350], [200, 200, 400, 400], ], # Bounding boxes for image 2 [ [300, 300, 500, 500], [350, 350, 550, 550], [400, 400, 600, 600], ], ], # Bounding boxes for image 3 dtype=tf.float32, ), "classes": tf.ones((3, 3), dtype=tf.float32), } inputs = {"images": img} if layer_cls in NO_BOUNDING_BOXES_TESTS: inputs["labels"] = bounding_boxes["classes"] else: inputs["bounding_boxes"] = bounding_boxes layer = layer_cls(**init_args) layer(inputs) @classmethod def tearDownClass(cls) -> None: # Do not affect other tests keras.mixed_precision.set_global_policy("float32") if __name__ == "__main__": tf.test.main()
keras-cv/keras_cv/layers/preprocessing/with_mixed_precision_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/with_mixed_precision_test.py", "repo_id": "keras-cv", "token_count": 3825 }
12
# Copyright 2022 Waymo LLC. # # Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501 import tensorflow as tf from keras_cv.api_export import keras_cv_export from keras_cv.backend import random from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d from keras_cv.point_cloud import coordinate_transform from keras_cv.point_cloud import wrap_angle_radians POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES @keras_cv_export("keras_cv.layers.GlobalRandomRotation") class GlobalRandomRotation(base_augmentation_layer_3d.BaseAugmentationLayer3D): """A preprocessing layer which randomly rotates point clouds and bounding boxes along X, Y and Z axes during training. This layer will randomly rotate the whole scene along the X, Y and Z axes based on a randomly sampled rotation angle between [-max_rotation_angle, max_rotation_angle] (in radians) following a uniform distribution. During inference time, the output will be identical to input. Call the layer with `training=True` to rotate the input. Input shape: point_clouds: 3D (multi frames) float32 Tensor with shape [num of frames, num of points, num of point features]. The first 5 features are [x, y, z, class, range]. bounding_boxes: 3D (multi frames) float32 Tensor with shape [num of frames, num of boxes, num of box features]. Boxes are expected to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py for more details on supported bounding box formats. Output shape: A dictionary of Tensors with the same shape as input Tensors. Arguments: max_rotation_angle_x: A float scalar sets the maximum rotation angle (in radians) along X axis. max_rotation_angle_y: A float scalar sets the maximum rotation angle (in radians) along Y axis. max_rotation_angle_z: A float scalar sets the maximum rotation angle (in radians) along Z axis. """ def __init__( self, max_rotation_angle_x=None, max_rotation_angle_y=None, max_rotation_angle_z=None, **kwargs ): super().__init__(**kwargs) max_rotation_angle_x = ( max_rotation_angle_x if max_rotation_angle_x else 0.0 ) max_rotation_angle_y = ( max_rotation_angle_y if max_rotation_angle_y else 0.0 ) max_rotation_angle_z = ( max_rotation_angle_z if max_rotation_angle_z else 0.0 ) if max_rotation_angle_x < 0: raise ValueError("max_rotation_angle_x must be >=0.") if max_rotation_angle_y < 0: raise ValueError("max_rotation_angle_y must be >=0.") if max_rotation_angle_z < 0: raise ValueError("max_rotation_angle_z must be >=0.") self._max_rotation_angle_x = max_rotation_angle_x self._max_rotation_angle_y = max_rotation_angle_y self._max_rotation_angle_z = max_rotation_angle_z def get_config(self): return { "max_rotation_angle_x": self._max_rotation_angle_x, "max_rotation_angle_y": self._max_rotation_angle_y, "max_rotation_angle_z": self._max_rotation_angle_z, } def get_random_transformation(self, **kwargs): random_rotation_x = random.uniform( (), minval=-self._max_rotation_angle_x, maxval=self._max_rotation_angle_x, dtype=self.compute_dtype, seed=self._random_generator, ) random_rotation_y = random.uniform( (), minval=-self._max_rotation_angle_y, maxval=self._max_rotation_angle_y, dtype=self.compute_dtype, seed=self._random_generator, ) random_rotation_z = random.uniform( (), minval=-self._max_rotation_angle_z, maxval=self._max_rotation_angle_z, dtype=self.compute_dtype, seed=self._random_generator, ) return { "pose": tf.stack( [ 0, 0, 0, random_rotation_z, random_rotation_x, random_rotation_y, ], axis=0, ) } def augment_point_clouds_bounding_boxes( self, point_clouds, bounding_boxes, transformation, **kwargs ): pose = transformation["pose"] point_clouds_xyz = coordinate_transform(point_clouds[..., :3], pose) point_clouds = tf.concat( [point_clouds_xyz, point_clouds[..., 3:]], axis=-1 ) bounding_boxes_xyz = coordinate_transform( bounding_boxes[..., : CENTER_XYZ_DXDYDZ_PHI.Z + 1], pose ) bounding_boxes_heading = wrap_angle_radians( tf.expand_dims( bounding_boxes[..., CENTER_XYZ_DXDYDZ_PHI.PHI], axis=-1 ) - pose[3] ) bounding_boxes = tf.concat( [ bounding_boxes_xyz, bounding_boxes[ ..., CENTER_XYZ_DXDYDZ_PHI.DX : CENTER_XYZ_DXDYDZ_PHI.DZ + 1 ], bounding_boxes_heading, bounding_boxes[..., CENTER_XYZ_DXDYDZ_PHI.CLASS :], ], axis=-1, ) return (point_clouds, bounding_boxes)
keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_rotation.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_rotation.py", "repo_id": "keras-cv", "token_count": 2740 }
13
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from keras_cv.layers.vit_layers import PatchingAndEmbedding from keras_cv.tests.test_case import TestCase class ViTLayersTest(TestCase): def test_patching_wrong_patch_size(self): with self.assertRaisesRegexp( ValueError, "The patch_size cannot be a negative number. Received -16", ): PatchingAndEmbedding(project_dim=16, patch_size=-16) def test_patching_wrong_padding(self): with self.assertRaisesRegexp( ValueError, "Padding must be either 'SAME' or 'VALID', but REFLECT was passed.", ): PatchingAndEmbedding( project_dim=16, patch_size=16, padding="REFLECT" ) def test_patch_embedding_return_type_and_shape(self): layer = PatchingAndEmbedding(project_dim=128, patch_size=16) inputs = tf.random.normal([1, 224, 224, 3]) output = layer(inputs) self.assertTrue(isinstance(output, tf.Tensor)) self.assertLen(output, 1) self.assertEquals(output.shape, [1, 197, 128]) def test_patch_embedding_interpolation(self): inputs = np.ones([1, 224, 224, 3]) patch_embedding = PatchingAndEmbedding(project_dim=128, patch_size=16) patch_embedding.build(inputs.shape) positional_embeddings = np.ones([197, 128]) ( output, cls, ) = patch_embedding._PatchingAndEmbedding__interpolate_positional_embeddings( # noqa: E501 positional_embeddings, height=450, width=450, patch_size=12 ) self.assertTrue(isinstance(output, tf.Tensor)) self.assertLen(output, 1) self.assertEquals(output.shape, [1, 1369, 128]) def test_patch_embedding_interpolation_numerical(self): inputs = np.ones([1, 4, 4, 3]) patch_embedding = PatchingAndEmbedding(project_dim=4, patch_size=1) patch_embedding.build(inputs.shape) positional_embeddings = np.ones([17, 4]) ( output, cls_token, ) = patch_embedding._PatchingAndEmbedding__interpolate_positional_embeddings( # noqa: E501 positional_embeddings, height=8, width=8, patch_size=2 ) self.assertTrue( tf.reduce_all(tf.equal(output, np.ones([1, 16, 4]))).numpy() )
keras-cv/keras_cv/layers/vit_layers_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/vit_layers_test.py", "repo_id": "keras-cv", "token_count": 1241 }
14
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops LARGE_NUM = 1e9 def l2_normalize(x, axis): epsilon = keras.backend.epsilon() power_sum = ops.sum(ops.square(x), axis=axis, keepdims=True) norm = ops.reciprocal(ops.sqrt(ops.maximum(power_sum, epsilon))) return ops.multiply(x, norm) @keras_cv_export("keras_cv.losses.SimCLRLoss") class SimCLRLoss(keras.losses.Loss): """Implements SimCLR Cosine Similarity loss. SimCLR loss is used for contrastive self-supervised learning. Args: temperature: a float value between 0 and 1, used as a scaling factor for cosine similarity. References: - [SimCLR paper](https://arxiv.org/pdf/2002.05709) """ def __init__(self, temperature, **kwargs): super().__init__(**kwargs) self.temperature = temperature def call(self, projections_1, projections_2): """Computes SimCLR loss for a pair of projections in a contrastive learning trainer. Note that unlike most loss functions, this should not be called with y_true and y_pred, but with two unlabeled projections. It can otherwise be treated as a normal loss function. Args: projections_1: a tensor with the output of the first projection model in a contrastive learning trainer projections_2: a tensor with the output of the second projection model in a contrastive learning trainer Returns: A tensor with the SimCLR loss computed from the input projections """ # Normalize the projections projections_1 = l2_normalize(projections_1, axis=1) projections_2 = l2_normalize(projections_2, axis=1) # Produce artificial labels, 1 for each image in the batch. batch_size = ops.shape(projections_1)[0] labels = ops.one_hot(ops.arange(batch_size), batch_size * 2) masks = ops.one_hot(ops.arange(batch_size), batch_size) # Compute logits logits_11 = ( ops.matmul(projections_1, ops.transpose(projections_1)) / self.temperature ) logits_11 = logits_11 - ops.cast(masks * LARGE_NUM, logits_11.dtype) logits_22 = ( ops.matmul(projections_2, ops.transpose(projections_2)) / self.temperature ) logits_22 = logits_22 - ops.cast(masks * LARGE_NUM, logits_22.dtype) logits_12 = ( ops.matmul(projections_1, ops.transpose(projections_2)) / self.temperature ) logits_21 = ( ops.matmul(projections_2, ops.transpose(projections_1)) / self.temperature ) loss_a = keras.losses.categorical_crossentropy( labels, ops.concatenate([logits_12, logits_11], 1), from_logits=True ) loss_b = keras.losses.categorical_crossentropy( labels, ops.concatenate([logits_21, logits_22], 1), from_logits=True ) return loss_a + loss_b def get_config(self): config = super().get_config() config.update({"temperature": self.temperature}) return config
keras-cv/keras_cv/losses/simclr_loss.py/0
{ "file_path": "keras-cv/keras_cv/losses/simclr_loss.py", "repo_id": "keras-cv", "token_count": 1563 }
15
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_cv.api_export import keras_cv_export from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone import ( CSPDarkNetBackbone, ) from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone_presets import ( backbone_presets, ) from keras_cv.utils.python_utils import classproperty ALIAS_DOCSTRING = """CSPDarkNetBackbone model with {stackwise_channels} channels and {stackwise_depth} depths. Reference: - [YoloV4 Paper](https://arxiv.org/abs/1804.02767) - [CSPNet Paper](https://arxiv.org/pdf/1911.11929) - [YoloX Paper](https://arxiv.org/abs/2107.08430) For transfer learning use cases, make sure to read the [guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/). Args: include_rescaling: bool, whether or not to rescale the inputs. If set to True, inputs will be passed through a `Rescaling(1/255.0)` layer. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, defaults to (None, None, 3). Examples: ```python input_data = tf.ones(shape=(8, 224, 224, 3)) # Randomly initialized backbone model = CSPDarkNet{name}Backbone() output = model(input_data) ``` """ # noqa: E501 @keras_cv_export("keras_cv.losses.CSPDarkNetTinyBackbone") class CSPDarkNetTinyBackbone(CSPDarkNetBackbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return CSPDarkNetBackbone.from_preset("csp_darknet_tiny", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "csp_darknet_tiny_imagenet": copy.deepcopy( backbone_presets["csp_darknet_tiny_imagenet"] ) } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets @keras_cv_export("keras_cv.losses.CSPDarkNetSBackbone") class CSPDarkNetSBackbone(CSPDarkNetBackbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return CSPDarkNetBackbone.from_preset("csp_darknet_s", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return {} @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return {} @keras_cv_export("keras_cv.losses.CSPDarkNetMBackbone") class CSPDarkNetMBackbone(CSPDarkNetBackbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return CSPDarkNetBackbone.from_preset("csp_darknet_m", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return {} @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return {} @keras_cv_export("keras_cv.losses.CSPDarkNetLBackbone") class CSPDarkNetLBackbone(CSPDarkNetBackbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return CSPDarkNetBackbone.from_preset("csp_darknet_l", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "csp_darknet_l_imagenet": copy.deepcopy( backbone_presets["csp_darknet_l_imagenet"] ) } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets @keras_cv_export("keras_cv.losses.CSPDarkNetXLBackbone") class CSPDarkNetXLBackbone(CSPDarkNetBackbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return CSPDarkNetBackbone.from_preset("csp_darknet_xl", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return {} @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return {} setattr( CSPDarkNetTinyBackbone, "__doc__", ALIAS_DOCSTRING.format( name="Tiny", stackwise_channels="[48, 96, 192, 384]", stackwise_depth="[1, 3, 3, 1]", ), ) setattr( CSPDarkNetSBackbone, "__doc__", ALIAS_DOCSTRING.format( name="S", stackwise_channels="[64, 128, 256, 512]", stackwise_depth="[1, 3, 3, 1]", ), ) setattr( CSPDarkNetMBackbone, "__doc__", ALIAS_DOCSTRING.format( name="M", stackwise_channels="[96, 192, 384, 768]", stackwise_depth="[2, 6, 6, 2]", ), ) setattr( CSPDarkNetLBackbone, "__doc__", ALIAS_DOCSTRING.format( name="L", stackwise_channels="[128, 256, 512, 1024]", stackwise_depth="[3, 9, 9, 3]", ), ) setattr( CSPDarkNetXLBackbone, "__doc__", ALIAS_DOCSTRING.format( name="XL", stackwise_channels="[170, 340, 680, 1360]", stackwise_depth="[4, 12, 12, 4]", ), )
keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_aliases.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_aliases.py", "repo_id": "keras-cv", "token_count": 3456 }
16
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MiT backbone model. References: - [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) (CVPR 2021) - [Based on the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/blob/main/deepvision/models/classification/mix_transformer/mit_tf.py) - [Based on the NVlabs' official PyTorch implementation](https://github.com/NVlabs/SegFormer/blob/master/mmseg/models/backbones/mix_transformer.py) - [Inspired by @sithu31296's reimplementation](https://github.com/sithu31296/semantic-segmentation/blob/main/semseg/models/backbones/mit.py) """ # noqa: E501 import copy import numpy as np from keras_cv import layers as cv_layers from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.models import utils from keras_cv.models.backbones.backbone import Backbone from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone_presets import ( # noqa: E501 backbone_presets, ) from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone_presets import ( # noqa: E501 backbone_presets_with_weights, ) from keras_cv.utils.python_utils import classproperty @keras_cv_export("keras_cv.models.MiTBackbone") class MiTBackbone(Backbone): def __init__( self, include_rescaling, depths, input_shape=(224, 224, 3), input_tensor=None, embedding_dims=None, **kwargs, ): """A Keras model implementing the MixTransformer architecture to be used as a backbone for the SegFormer architecture. References: - [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) # noqa: E501 - [Based on the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/tree/main/deepvision/models/classification/mix_transformer) # noqa: E501 Args: include_rescaling: bool, whether to rescale the inputs. If set to `True`, inputs will be passed through a `Rescaling(1/255.0)` layer. depths: the number of transformer encoders to be used per stage in the network embedding_dims: the embedding dims per hierarchical stage, used as the levels of the feature pyramid input_shape: optional shape tuple, defaults to (None, None, 3). input_tensor: optional Keras tensor (i.e. output of `keras.layers.Input()`) to use as image input for the model. Examples: Using the class with a `backbone`: ```python import tensorflow as tf import keras_cv images = np.ones(shape=(1, 96, 96, 3)) labels = np.zeros(shape=(1, 96, 96, 1)) backbone = keras_cv.models.MiTBackbone.from_preset("mit_b0_imagenet") # Evaluate model model(images) # Train model model.compile( optimizer="adam", loss=keras.losses.BinaryCrossentropy(from_logits=False), metrics=["accuracy"], ) model.fit(images, labels, epochs=3) ``` """ drop_path_rate = 0.1 dpr = [x for x in np.linspace(0.0, drop_path_rate, sum(depths))] blockwise_num_heads = [1, 2, 5, 8] blockwise_sr_ratios = [8, 4, 2, 1] num_stages = 4 cur = 0 patch_embedding_layers = [] transformer_blocks = [] layer_norms = [] for i in range(num_stages): patch_embed_layer = cv_layers.OverlappingPatchingAndEmbedding( project_dim=embedding_dims[0] if i == 0 else embedding_dims[i], patch_size=7 if i == 0 else 3, stride=4 if i == 0 else 2, name=f"patch_and_embed_{i}", ) patch_embedding_layers.append(patch_embed_layer) transformer_block = [ cv_layers.HierarchicalTransformerEncoder( project_dim=embedding_dims[i], num_heads=blockwise_num_heads[i], sr_ratio=blockwise_sr_ratios[i], drop_prob=dpr[cur + k], name=f"hierarchical_encoder_{i}_{k}", ) for k in range(depths[i]) ] transformer_blocks.append(transformer_block) cur += depths[i] layer_norms.append(keras.layers.LayerNormalization()) inputs = utils.parse_model_inputs(input_shape, input_tensor) x = inputs if include_rescaling: x = keras.layers.Rescaling(scale=1 / 255)(x) pyramid_level_inputs = [] for i in range(num_stages): # Compute new height/width after the `proj` # call in `OverlappingPatchingAndEmbedding` stride = 4 if i == 0 else 2 new_height, new_width = ( int(ops.shape(x)[1] / stride), int(ops.shape(x)[2] / stride), ) x = patch_embedding_layers[i](x) for blk in transformer_blocks[i]: x = blk(x) x = layer_norms[i](x) x = keras.layers.Reshape( (new_height, new_width, -1), name=f"output_level_{i}" )(x) pyramid_level_inputs.append(utils.get_tensor_input_name(x)) super().__init__(inputs=inputs, outputs=x, **kwargs) self.depths = depths self.embedding_dims = embedding_dims self.include_rescaling = include_rescaling self.input_tensor = input_tensor self.pyramid_level_inputs = { f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs) } def get_config(self): config = super().get_config() config.update( { "depths": self.depths, "embedding_dims": self.embedding_dims, "include_rescaling": self.include_rescaling, "input_shape": self.input_shape[1:], "input_tensor": self.input_tensor, } ) return config @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return copy.deepcopy(backbone_presets) @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return copy.deepcopy(backbone_presets_with_weights)
keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone.py", "repo_id": "keras-cv", "token_count": 3240 }
17
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import pytest from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetBBackbone from keras_cv.tests.test_case import TestCase class TestViTDetBackbone(TestCase): @pytest.mark.large def test_call(self): model = ViTDetBBackbone() x = np.ones((1, 1024, 1024, 3)) x_out = ops.convert_to_numpy(model(x)) num_parameters = sum( np.prod(tuple(x.shape)) for x in model.trainable_variables ) self.assertEqual(x_out.shape, (1, 64, 64, 256)) self.assertEqual(num_parameters, 89_670_912) @pytest.mark.extra_large def teat_save(self): # saving test model = ViTDetBBackbone() x = np.ones((1, 1024, 1024, 3)) x_out = ops.convert_to_numpy(model(x)) path = os.path.join(self.get_temp_dir(), "model.keras") model.save(path) loaded_model = keras.saving.load_model(path) x_out_loaded = ops.convert_to_numpy(loaded_model(x)) self.assertAllClose(x_out, x_out_loaded) @pytest.mark.extra_large def test_fit(self): model = ViTDetBBackbone() x = np.ones((1, 1024, 1024, 3)) y = np.zeros((1, 64, 64, 256)) model.compile(optimizer="adam", loss="mse", metrics=["mse"]) model.fit(x, y, epochs=1) def test_pyramid_level_inputs_error(self): model = ViTDetBBackbone() with self.assertRaises(NotImplementedError, msg="doesn't compute"): model.pyramid_level_inputs
keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_test.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_test.py", "repo_id": "keras-cv", "token_count": 895 }
18
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.models.legacy.convmixer import ConvMixer_512_16 from keras_cv.models.legacy.convmixer import ConvMixer_768_32 from keras_cv.models.legacy.convmixer import ConvMixer_1024_16 from keras_cv.models.legacy.convmixer import ConvMixer_1536_20 from keras_cv.models.legacy.convmixer import ConvMixer_1536_24 from keras_cv.models.legacy.convnext import ConvNeXtBase from keras_cv.models.legacy.convnext import ConvNeXtLarge from keras_cv.models.legacy.convnext import ConvNeXtSmall from keras_cv.models.legacy.convnext import ConvNeXtTiny from keras_cv.models.legacy.convnext import ConvNeXtXLarge from keras_cv.models.legacy.darknet import DarkNet21 from keras_cv.models.legacy.darknet import DarkNet53 from keras_cv.models.legacy.mlp_mixer import MLPMixerB16 from keras_cv.models.legacy.mlp_mixer import MLPMixerB32 from keras_cv.models.legacy.mlp_mixer import MLPMixerL16 from keras_cv.models.legacy.object_detection.faster_rcnn.faster_rcnn import ( FasterRCNN, ) from keras_cv.models.legacy.regnet import RegNetX002 from keras_cv.models.legacy.regnet import RegNetX004 from keras_cv.models.legacy.regnet import RegNetX006 from keras_cv.models.legacy.regnet import RegNetX008 from keras_cv.models.legacy.regnet import RegNetX016 from keras_cv.models.legacy.regnet import RegNetX032 from keras_cv.models.legacy.regnet import RegNetX040 from keras_cv.models.legacy.regnet import RegNetX064 from keras_cv.models.legacy.regnet import RegNetX080 from keras_cv.models.legacy.regnet import RegNetX120 from keras_cv.models.legacy.regnet import RegNetX160 from keras_cv.models.legacy.regnet import RegNetX320 from keras_cv.models.legacy.regnet import RegNetY002 from keras_cv.models.legacy.regnet import RegNetY004 from keras_cv.models.legacy.regnet import RegNetY006 from keras_cv.models.legacy.regnet import RegNetY008 from keras_cv.models.legacy.regnet import RegNetY016 from keras_cv.models.legacy.regnet import RegNetY032 from keras_cv.models.legacy.regnet import RegNetY040 from keras_cv.models.legacy.regnet import RegNetY064 from keras_cv.models.legacy.regnet import RegNetY080 from keras_cv.models.legacy.regnet import RegNetY120 from keras_cv.models.legacy.regnet import RegNetY160 from keras_cv.models.legacy.regnet import RegNetY320 from keras_cv.models.legacy.vgg16 import VGG16 from keras_cv.models.legacy.vgg19 import VGG19 from keras_cv.models.legacy.vit import ViTB16 from keras_cv.models.legacy.vit import ViTB32 from keras_cv.models.legacy.vit import ViTH16 from keras_cv.models.legacy.vit import ViTH32 from keras_cv.models.legacy.vit import ViTL16 from keras_cv.models.legacy.vit import ViTL32 from keras_cv.models.legacy.vit import ViTS16 from keras_cv.models.legacy.vit import ViTS32 from keras_cv.models.legacy.vit import ViTTiny16 from keras_cv.models.legacy.vit import ViTTiny32
keras-cv/keras_cv/models/legacy/__init__.py/0
{ "file_path": "keras-cv/keras_cv/models/legacy/__init__.py", "repo_id": "keras-cv", "token_count": 1203 }
19
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras @keras_cv_export("keras_cv.models.retinanet.PredictionHead") class PredictionHead(keras.layers.Layer): """The class/box predictions head. Arguments: output_filters: Number of convolution filters in the final layer. bias_initializer: Bias Initializer for the final convolution layer. Returns: A function representing either the classification or the box regression head depending on `output_filters`. """ def __init__( self, output_filters, bias_initializer, num_conv_layers=3, **kwargs ): super().__init__(**kwargs) self.output_filters = output_filters self.bias_initializer = bias_initializer self.num_conv_layers = num_conv_layers self.conv_layers = [ keras.layers.Conv2D( 256, kernel_size=3, padding="same", kernel_initializer="orthogonal", activation="relu", ) for _ in range(num_conv_layers) ] self.prediction_layer = keras.layers.Conv2D( self.output_filters, kernel_size=3, strides=1, padding="same", kernel_initializer="orthogonal", bias_initializer=self.bias_initializer, ) def call(self, x, training=False): for layer in self.conv_layers: x = layer(x, training=training) x = self.prediction_layer(x, training=training) return x def compute_output_shape(self, input_shape): return tuple(input_shape[:-1]) + (self.output_filters,) def get_config(self): config = { "bias_initializer": keras.initializers.serialize( self.bias_initializer ), "output_filters": self.output_filters, "num_conv_layers": self.num_conv_layers, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): config.update( { "bias_initializer": keras.initializers.deserialize( config["bias_initializer"] ) } ) return super().from_config(config) def build(self, input_shape): self.conv_layers[0].build(input_shape) intermediate_shape = tuple(input_shape[:-1]) + (256,) for conv_layer in self.conv_layers[1:]: conv_layer.build(intermediate_shape) self.prediction_layer.build(intermediate_shape) self.built = True
keras-cv/keras_cv/models/object_detection/retinanet/prediction_head.py/0
{ "file_path": "keras-cv/keras_cv/models/object_detection/retinanet/prediction_head.py", "repo_id": "keras-cv", "token_count": 1424 }
20
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.models import utils from keras_cv.models.backbones.backbone_presets import backbone_presets from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import ( apply_basic_block as resnet_basic_block, ) from keras_cv.models.segmentation.basnet.basnet_presets import basnet_presets from keras_cv.models.segmentation.basnet.basnet_presets import ( presets_no_weights, ) from keras_cv.models.segmentation.basnet.basnet_presets import ( presets_with_weights, ) from keras_cv.models.task import Task from keras_cv.utils.python_utils import classproperty @keras_cv_export( [ "keras_cv.models.BASNet", "keras_cv.models.segmentation.BASNet", ] ) class BASNet(Task): """ A Keras model implementing the BASNet architecture for semantic segmentation. References: - [BASNet: Boundary-Aware Segmentation Network for Mobile and Web Applications](https://arxiv.org/abs/2101.04704) Args: backbone: `keras.Model`. The backbone network for the model that is used as a feature extractor for BASNet prediction encoder. Currently supported backbones are ResNet18 and ResNet34. Default backbone is `keras_cv.models.ResNet34Backbone()` (Note: Do not specify 'input_shape', 'input_tensor', or 'include_rescaling' within the backbone. Please provide these while initializing the 'BASNet' model.) num_classes: int, the number of classes for the segmentation model. input_shape: optional shape tuple, defaults to (None, None, 3). input_tensor: optional Keras tensor (i.e., output of `layers.Input()`) to use as image input for the model. include_rescaling: bool, whether to rescale the inputs. If set to `True`, inputs will be passed through a `Rescaling(1/255.0)` layer. projection_filters: int, number of filters in the convolution layer projecting low-level features from the `backbone`. prediction_heads: (Optional) List of `keras.layers.Layer` defining the prediction module head for the model. If not provided, a default head is created with a Conv2D layer followed by resizing. refinement_head: (Optional) a `keras.layers.Layer` defining the refinement module head for the model. If not provided, a default head is created with a Conv2D layer. Examples: ```python import keras_cv images = np.ones(shape=(1, 288, 288, 3)) labels = np.zeros(shape=(1, 288, 288, 1)) # Note: Do not specify 'input_shape', 'input_tensor', or # 'include_rescaling' within the backbone. backbone = keras_cv.models.ResNet34Backbone() model = keras_cv.models.segmentation.BASNet( backbone=backbone, num_classes=1, input_shape=[288, 288, 3], include_rescaling=False ) # Evaluate model output = model(images) pred_labels = output[0] # Train model model.compile( optimizer="adam", loss=keras.losses.BinaryCrossentropy(from_logits=False), metrics=["accuracy"], ) model.fit(images, labels, epochs=3) ``` """ # noqa: E501 def __init__( self, backbone, num_classes, input_shape=(None, None, 3), input_tensor=None, include_rescaling=False, projection_filters=64, prediction_heads=None, refinement_head=None, **kwargs, ): if not isinstance(backbone, keras.layers.Layer) or not isinstance( backbone, keras.Model ): raise ValueError( "Argument `backbone` must be a `keras.layers.Layer` instance" f" or `keras.Model`. Received instead" f" backbone={backbone} (of type {type(backbone)})." ) if backbone.input_shape != (None, None, None, 3): raise ValueError( "Do not specify 'input_shape' or 'input_tensor' within the" " 'BASNet' backbone. \nPlease provide 'input_shape' or" " 'input_tensor' while initializing the 'BASNet' model." ) inputs = utils.parse_model_inputs(input_shape, input_tensor) x = inputs if include_rescaling: x = keras.layers.Rescaling(1 / 255.0)(x) if prediction_heads is None: prediction_heads = [] for size in (1, 2, 4, 8, 16, 32, 32): head_layers = [ keras.layers.Conv2D( num_classes, kernel_size=(3, 3), padding="same" ) ] if size != 1: head_layers.append( keras.layers.UpSampling2D( size=size, interpolation="bilinear" ) ) prediction_heads.append(keras.Sequential(head_layers)) if refinement_head is None: refinement_head = keras.Sequential( [ keras.layers.Conv2D( num_classes, kernel_size=(3, 3), padding="same" ), ] ) # Prediction model. predict_model = basnet_predict( x, backbone, projection_filters, prediction_heads ) # Refinement model. refine_model = basnet_rrm( predict_model, projection_filters, refinement_head ) outputs = refine_model.outputs # Combine outputs. outputs.extend(predict_model.outputs) outputs = [ keras.layers.Activation("sigmoid", dtype="float32")(_) for _ in outputs ] # Activations. super().__init__(inputs=inputs, outputs=outputs, **kwargs) self.backbone = backbone self.num_classes = num_classes self.input_tensor = input_tensor self.include_rescaling = include_rescaling self.projection_filters = projection_filters self.prediction_heads = prediction_heads self.refinement_head = refinement_head def get_config(self): return { "backbone": keras.saving.serialize_keras_object(self.backbone), "num_classes": self.num_classes, "input_shape": self.input_shape[1:], "input_tensor": keras.saving.serialize_keras_object( self.input_tensor ), "include_rescaling": self.include_rescaling, "projection_filters": self.projection_filters, "prediction_heads": [ keras.saving.serialize_keras_object(prediction_head) for prediction_head in self.prediction_heads ], "refinement_head": keras.saving.serialize_keras_object( self.refinement_head ), } @classmethod def from_config(cls, config): if "backbone" in config and isinstance(config["backbone"], dict): input_shape = (None, None, 3) if isinstance(config["backbone"]["config"]["input_shape"], list): input_shape = list(input_shape) if config["backbone"]["config"]["input_shape"] != input_shape: config["input_shape"] = config["backbone"]["config"][ "input_shape" ] config["backbone"]["config"]["input_shape"] = input_shape config["backbone"] = keras.layers.deserialize(config["backbone"]) if "input_tensor" in config and isinstance( config["input_tensor"], dict ): config["input_tensor"] = keras.layers.deserialize( config["input_tensor"] ) if "prediction_heads" in config and isinstance( config["prediction_heads"], list ): for i in range(len(config["prediction_heads"])): if isinstance(config["prediction_heads"][i], dict): config["prediction_heads"][i] = keras.layers.deserialize( config["prediction_heads"][i] ) if "refinement_head" in config and isinstance( config["refinement_head"], dict ): config["refinement_head"] = keras.layers.deserialize( config["refinement_head"] ) return super().from_config(config) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" filtered_backbone_presets = copy.deepcopy( { k: v for k, v in backbone_presets.items() if k in ("resnet18", "resnet34") } ) return copy.deepcopy({**filtered_backbone_presets, **basnet_presets}) @classproperty def presets_with_weights(cls): """ Dictionary of preset names and configurations that include weights. """ return copy.deepcopy(presets_with_weights) @classproperty def presets_without_weights(cls): """ Dictionary of preset names and configurations that has no weights. """ return copy.deepcopy(presets_no_weights) @classproperty def backbone_presets(cls): """ Dictionary of preset names and configurations of compatible backbones. """ filtered_backbone_presets = copy.deepcopy( { k: v for k, v in backbone_presets.items() if k in ("resnet18", "resnet34") } ) filtered_presets = copy.deepcopy(filtered_backbone_presets) return filtered_presets def convolution_block(x_input, filters, dilation=1): """ Apply convolution + batch normalization + ReLU activation. Args: x_input: Input keras tensor. filters: int, number of output filters in the convolution. dilation: int, dilation rate for the convolution operation. Defaults to 1. Returns: A tensor with convolution, batch normalization, and ReLU activation applied. """ x = keras.layers.Conv2D( filters, (3, 3), padding="same", dilation_rate=dilation )(x_input) x = keras.layers.BatchNormalization()(x) return keras.layers.Activation("relu")(x) def get_resnet_block(_resnet, block_num): """ Extract and return a specific ResNet block. Args: _resnet: `keras.Model`. ResNet model instance. block_num: int, block number to extract. Returns: A Keras Model representing the specified ResNet block. """ extractor_levels = ["P2", "P3", "P4", "P5"] return keras.models.Model( inputs=_resnet.get_layer(f"v2_stack_{block_num}_block1_1_conv").input, outputs=_resnet.get_layer( _resnet.pyramid_level_inputs[extractor_levels[block_num]] ).output, name=f"resnet_block{block_num + 1}", ) def basnet_predict(x_input, backbone, filters, segmentation_heads): """ BASNet Prediction Module. This module outputs a coarse label map by integrating heavy encoder, bridge, and decoder blocks. Args: x_input: Input keras tensor. backbone: `keras.Model`. The backbone network used as a feature extractor for BASNet prediction encoder. filters: int, the number of filters. segmentation_heads: List of `keras.layers.Layer`, A list of Keras layers serving as the segmentation head for prediction module. Returns: A Keras Model that integrates the encoder, bridge, and decoder blocks for coarse label map prediction. """ num_stages = 6 x = x_input # -------------Encoder-------------- x = keras.layers.Conv2D(filters, kernel_size=(3, 3), padding="same")(x) encoder_blocks = [] for i in range(num_stages): if i < 4: # First four stages are adopted from ResNet backbone. x = get_resnet_block(backbone, i)(x) encoder_blocks.append(x) else: # Last 2 stages consist of three basic resnet blocks. x = keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x) for j in range(3): x = resnet_basic_block( x, filters=x.shape[3], conv_shortcut=False, name=f"v1_basic_block_{i + 1}_{j + 1}", ) encoder_blocks.append(x) # -------------Bridge------------- x = convolution_block(x, filters=filters * 8, dilation=2) x = convolution_block(x, filters=filters * 8, dilation=2) x = convolution_block(x, filters=filters * 8, dilation=2) encoder_blocks.append(x) # -------------Decoder------------- decoder_blocks = [] for i in reversed(range(num_stages)): if i != (num_stages - 1): # Except first, scale other decoder stages. x = keras.layers.UpSampling2D(size=2, interpolation="bilinear")(x) x = keras.layers.concatenate([encoder_blocks[i], x], axis=-1) x = convolution_block(x, filters=filters * 8) x = convolution_block(x, filters=filters * 8) x = convolution_block(x, filters=filters * 8) decoder_blocks.append(x) decoder_blocks.reverse() # Change order from last to first decoder stage. decoder_blocks.append(encoder_blocks[-1]) # Copy bridge to decoder. # -------------Side Outputs-------------- decoder_blocks = [ segmentation_head(decoder_block) # Prediction segmentation head. for segmentation_head, decoder_block in zip( segmentation_heads, decoder_blocks ) ] return keras.models.Model(inputs=[x_input], outputs=decoder_blocks) def basnet_rrm(base_model, filters, segmentation_head): """ BASNet Residual Refinement Module (RRM). This module outputs a fine label map by integrating light encoder, bridge, and decoder blocks. Args: base_model: Keras model used as the base or coarse label map. filters: int, the number of filters. segmentation_head: a `keras.layers.Layer`, A Keras layer serving as the segmentation head for refinement module. Returns: A Keras Model that constructs the Residual Refinement Module (RRM). """ num_stages = 4 x_input = base_model.output[0] # -------------Encoder-------------- x = keras.layers.Conv2D(filters, kernel_size=(3, 3), padding="same")( x_input ) encoder_blocks = [] for _ in range(num_stages): x = convolution_block(x, filters=filters) encoder_blocks.append(x) x = keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x) # -------------Bridge-------------- x = convolution_block(x, filters=filters) # -------------Decoder-------------- for i in reversed(range(num_stages)): x = keras.layers.UpSampling2D(size=2, interpolation="bilinear")(x) x = keras.layers.concatenate([encoder_blocks[i], x], axis=-1) x = convolution_block(x, filters=filters) x = segmentation_head(x) # Refinement segmentation head. # ------------- refined = coarse + residual x = keras.layers.Add()([x_input, x]) # Add prediction + refinement output return keras.models.Model(inputs=base_model.input, outputs=[x])
keras-cv/keras_cv/models/segmentation/basnet/basnet.py/0
{ "file_path": "keras-cv/keras_cv/models/segmentation/basnet/basnet.py", "repo_id": "keras-cv", "token_count": 7091 }
21
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.layers.vit_det_layers import MLP @keras_cv_export( "keras_cv.layers.MultiHeadAttentionWithDownsampling", package="keras_cv.layers", ) class MultiHeadAttentionWithDownsampling(keras.layers.Layer): """Multi-Head Attention with downsampling. An attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. This layer first downscales the features of input queries, keys, and values using a dense layer. Multi-head attention is then performed and the attention map is projected back (upscaled) to the number of input features. Args: num_heads (int): Number of attention heads. key_dim (int): Size of each attention head for query, key, and value. downsample_rate (int, optional): The factor by which to downscale the input features i.e. the input features of size `key_dim` are projected down to `key_dim // downsample_rate`. References: - [Segment Anything paper](https://arxiv.org/abs/2304.02643) - [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything) """ # noqa: E501 def __init__(self, num_heads, key_dim, downsample_rate=1, **kwargs): super().__init__(**kwargs) self.num_heads = num_heads self.key_dim = key_dim self.downsample_rate = downsample_rate self.internal_dims = key_dim // downsample_rate # Downsample self.query_proj = keras.layers.Dense( self.internal_dims * self.num_heads ) self.key_proj = keras.layers.Dense(self.internal_dims * self.num_heads) self.value_proj = keras.layers.Dense( self.internal_dims * self.num_heads ) # Upsample self.out_proj = keras.layers.Dense(self.key_dim * self.num_heads) def build(self, input_shape=None): self.query_proj.build([None, None, self.num_heads * self.key_dim]) self.key_proj.build([None, None, self.num_heads * self.key_dim]) self.value_proj.build([None, None, self.num_heads * self.key_dim]) self.out_proj.build([None, None, self.internal_dims * self.num_heads]) self.built = True def __separate_heads(self, x): shape = ops.shape(x) B, N, C = shape[0], shape[1], shape[2] x = ops.reshape(x, (B, N, self.num_heads, C // self.num_heads)) return ops.transpose(x, axes=(0, 2, 1, 3)) def __recombine_heads(self, x): shape = ops.shape(x) B, N_H, N_T, C_PH = shape[0], shape[1], shape[2], shape[3] x = ops.transpose(x, axes=(0, 2, 1, 3)) return ops.reshape(x, (B, N_T, N_H * C_PH)) def call(self, query, value, key): query = self.query_proj(query) key = self.key_proj(key) value = self.value_proj(value) # Separate into heads query = self.__separate_heads(query) key = self.__separate_heads(key) value = self.__separate_heads(value) # Attention C_PH = ops.shape(query)[-1] out = query @ ops.transpose(key, (0, 1, 3, 2)) out = out / ops.sqrt(ops.cast(C_PH, dtype=self.compute_dtype)) out = ops.softmax(out, axis=-1) # Get output attention_map = out @ value attention_map = self.__recombine_heads(attention_map) return self.out_proj(attention_map) def get_config(self): config = super().get_config() config.update( { "num_heads": self.num_heads, "key_dim": self.key_dim, "downsample_rate": self.downsample_rate, } ) return config @keras_cv_export( "keras_cv.layers.TwoWayMultiHeadAttention", package="keras_cv.layers" ) class TwoWayMultiHeadAttention(keras.layers.Layer): """Two-way multi-head attention layer. Args: num_heads (int): Number of attention heads. key_dim (int): Size of each attention head for query, key, and value. mlp_dim (int): Number of hidden dims to use in the mlp block. skip_first_layer_pe (bool): A boolean indicating whether to skip the first layer positional embeddings. attention_downsample_rate (int, optional): The downsample rate to use in the attention layers. Defaults to 2. activation (str, optional): The activation for the mlp block's output layer. Defaults to "relu". References: - [Segment Anything paper](https://arxiv.org/abs/2304.02643) - [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything) """ # noqa: E501 def __init__( self, num_heads, key_dim, mlp_dim, skip_first_layer_pe, attention_downsample_rate=2, activation="relu", **kwargs, ): super().__init__(**kwargs) self.num_heads = num_heads self.key_dim = key_dim self.mlp_dim = mlp_dim self.skip_first_layer_pe = skip_first_layer_pe self.attention_downsample_rate = attention_downsample_rate self.activation = activation self.self_attention = MultiHeadAttentionWithDownsampling( num_heads=num_heads, key_dim=key_dim ) self.layer_norm1 = keras.layers.LayerNormalization(epsilon=1e-5) self.cross_attention_token_to_image = ( MultiHeadAttentionWithDownsampling( num_heads=num_heads, key_dim=key_dim, downsample_rate=attention_downsample_rate, ) ) self.layer_norm2 = keras.layers.LayerNormalization(epsilon=1e-5) self.mlp_block = MLP( mlp_dim, key_dim * num_heads, num_layers=2, activation=activation, ) self.layer_norm3 = keras.layers.LayerNormalization(epsilon=1e-5) self.cross_attention_image_to_token = ( MultiHeadAttentionWithDownsampling( num_heads=num_heads, key_dim=key_dim, downsample_rate=attention_downsample_rate, ) ) self.layer_norm4 = keras.layers.LayerNormalization(epsilon=1e-5) def build(self, input_shape=None): self.self_attention.build() self.layer_norm1.build([None, None, self.num_heads * self.key_dim]) self.cross_attention_token_to_image.build() self.layer_norm2.build([None, None, self.num_heads * self.key_dim]) self.mlp_block.build([None, None, self.num_heads * self.key_dim]) self.layer_norm3.build([None, None, self.num_heads * self.key_dim]) self.cross_attention_image_to_token.build() self.layer_norm4.build([None, None, self.num_heads * self.key_dim]) self.built = True def call(self, queries, keys, query_pe, key_pe): if self.skip_first_layer_pe: queries = self.self_attention( query=queries, value=queries, key=queries ) else: queries_with_pe = queries + query_pe attention_map = self.self_attention( query=queries_with_pe, key=queries_with_pe, value=queries ) queries = queries + attention_map queries = self.layer_norm1(queries) queries_with_pe = queries + query_pe keys_with_pe = keys + key_pe attention_map = self.cross_attention_token_to_image( query=queries_with_pe, key=keys_with_pe, value=keys ) queries = queries + attention_map queries = self.layer_norm2(queries) mlp_out = self.mlp_block(queries) queries = queries + mlp_out queries = self.layer_norm3(queries) queries_with_pe = queries + query_pe keys_with_pe = keys + key_pe attention_map = self.cross_attention_image_to_token( query=keys_with_pe, key=queries_with_pe, value=queries ) keys = keys + attention_map keys = self.layer_norm4(keys) return queries, keys def get_config(self): config = super().get_config() config.update( { "num_heads": self.num_heads, "key_dim": self.key_dim, "mlp_dim": self.mlp_dim, "skip_first_layer_pe": self.skip_first_layer_pe, "attention_downsample_rate": self.attention_downsample_rate, "activation": self.activation, } ) return config @keras_cv_export( "keras_cv.layers.RandomFrequencyPositionalEmbeddings", package="keras_cv.layers", ) class RandomFrequencyPositionalEmbeddings(keras.layers.Layer): """Positional encoding using random spatial frequencies. This layer maps coordinates/points in 2D space to positional encodings using random spatial frequencies. Args: num_positional_features (int): Number of positional features in the output. scale (float): The standard deviation of the random frequencies. References: - [Segment Anything paper](https://arxiv.org/abs/2304.02643) - [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything) """ # noqa: E501 def __init__(self, num_positional_features, scale, **kwargs): super().__init__(**kwargs) self.num_positional_features = num_positional_features self.scale = scale self.positional_encoding_gaussian_matrix = self.add_weight( name="positional_encoding_gaussian_matrix", shape=(2, self.num_positional_features), dtype=self.variable_dtype, trainable=False, initializer=keras.initializers.get("normal"), ) def build(self, input_shape=None): self.built = True def __positional_encodings(self, coords): coords = coords * 2 - 1 coords = coords @ ops.cast( self.positional_encoding_gaussian_matrix, dtype=self.compute_dtype ) coords = coords * (2 * math.pi) return ops.concatenate([ops.sin(coords), ops.cos(coords)], axis=-1) def call(self, size): return self.encode_image(size) def encode_image(self, size): """Generate a positional encoding for an image of any given size. Args: size (tuple[int, int]): The size of the image. Returns: tensor: Positional encoding of the image. """ H, W = size grid = ops.ones(shape=(H, W), dtype=self.compute_dtype) y_embed = ops.cumsum(grid, axis=0) - 0.5 x_embed = ops.cumsum(grid, axis=1) - 0.5 y_embed = y_embed / ops.cast(H, self.compute_dtype) x_embed = x_embed / ops.cast(W, self.compute_dtype) return self.__positional_encodings( ops.stack([x_embed, y_embed], axis=-1) ) def encode_coordinates(self, coords_input, image_size): """Positionally encode points that are not normalized to `[0, 1]`. Args: coords_input (tensor): 2D coordinates/points to map. image_size (tuple[int, int]): Height and width of the image being prompted. Returns: tensor: Positional encodings of the normalized coordinates. """ coords_normalized = ops.stack( [ coords_input[..., 0] / image_size[1], coords_input[..., 1] / image_size[0], ], axis=-1, ) return self.__positional_encodings(coords_normalized) def get_config(self): config = super().get_config() config.update( { "num_positional_features": self.num_positional_features, "scale": self.scale, } ) return config
keras-cv/keras_cv/models/segmentation/segment_anything/sam_layers.py/0
{ "file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_layers.py", "repo_id": "keras-cv", "token_count": 5681 }
22
<jupyter_start><jupyter_code>!pip install --upgrade tensorflow !pip install "git+https://github.com/DavidLandup0/keras-cv@vit" import math import sys import os import pandas as pd import tensorflow as tf import keras_cv import matplotlib.pyplot as plt import numpy as np config_names = { # JAX name : KCV name, layer_nums "Ti/16": ("ViTTiny16", 12), "S/16": ("ViTS16", 12), "B/16": ("ViTB16", 12), "L/16": ("ViTL16", 24), "S/32": ("ViTS32", 12), "B/32": ("ViTB32", 12), } # Choose model to convert model_to_convert = list(config_names.items())[0] model_to_convert model = eval( f"keras_cv.models.{model_to_convert[1][0]}(include_rescaling=False, include_top=True, num_classes=1000, weights=None, input_shape=(224, 224, 3))" ) with tf.io.gfile.GFile("gs://vit_models/augreg/index.csv") as f: df = pd.read_csv(f) df.head() model_df = df.query( f'ds=="i21k" & adapt_resolution==224 & adapt_ds=="imagenet2012" & name=="{model_to_convert[0]}"' ).sort_values("adapt_final_test", ascending=False) model_df.head() best_model_i1k_checkpoint = str(model_df.iloc[0]["adapt_filename"]) model_df.iloc[0]["adapt_filename"], model_df.iloc[0]["adapt_final_test"] filename = best_model_i1k_checkpoint path = f"gs://vit_models/augreg/{filename}.npz" print(f"{tf.io.gfile.stat(path).length / 1024 / 1024:.1f} MiB - {path}") local_path = path.split("//")[-1].split("/")[-1] local_path !gsutil cp {path} . with open(local_path, "rb") as f: params_jax = np.load(f) params_jax = dict(zip(params_jax.keys(), params_jax.values())) from pprint import pformat print(pformat(list(params_jax.keys()))) jax_params_to_kcv_params = { "Transformer/posembed_input/pos_embedding": "patch_embedding/embedding/embeddings", "embedding/bias": "patch_embedding_1/dense_26/bias", "embedding/kernel": "patch_embedding_1/dense_26/kernel", "cls": "patch_embedding_1/class_token", "Transformer/encoderblock_0/LayerNorm_0/scale": "transformer_encoder_12/layer_normalization_25/gamma", "Transformer/encoderblock_0/LayerNorm_0/bias": "transformer_encoder_12/layer_normalization_25/beta", "Transformer/encoderblock_0/LayerNorm_2/scale": "transformer_encoder_12/layer_normalization_26/gamma", "Transformer/encoderblock_0/LayerNorm_2/bias": "transformer_encoder_12/layer_normalization_26/beta", "Transformer/encoderblock_0/MultiHeadDotProductAttention_1/query/kernel": "transformer_encoder_12/multi_head_attention_12/query/kernel", "Transformer/encoderblock_0/MultiHeadDotProductAttention_1/query/bias": "transformer_encoder_12/multi_head_attention_12/query/bias", "Transformer/encoderblock_0/MultiHeadDotProductAttention_1/key/kernel": "transformer_encoder_12/multi_head_attention_12/key/kernel", "Transformer/encoderblock_0/MultiHeadDotProductAttention_1/key/bias": "transformer_encoder_12/multi_head_attention_12/key/bias", "Transformer/encoderblock_0/MultiHeadDotProductAttention_1/value/kernel": "transformer_encoder_12/multi_head_attention_12/value/kernel", "Transformer/encoderblock_0/MultiHeadDotProductAttention_1/value/bias": "transformer_encoder_12/multi_head_attention_12/value/bias", "Transformer/encoderblock_0/MultiHeadDotProductAttention_1/out/kernel": "transformer_encoder_12/multi_head_attention_12/attention_output/kernel", "Transformer/encoderblock_0/MultiHeadDotProductAttention_1/out/bias": "transformer_encoder_12/multi_head_attention_12/attention_output/bias", "Transformer/encoderblock_0/MlpBlock_3/Dense_0/kernel": "transformer_encoder_12/dense_27/kernel", "Transformer/encoderblock_0/MlpBlock_3/Dense_0/bias": "transformer_encoder_12/dense_27/bias", "Transformer/encoderblock_0/MlpBlock_3/Dense_1/kernel": "transformer_encoder_12/dense_28/kernel", "Transformer/encoderblock_0/MlpBlock_3/Dense_1/bias": "transformer_encoder_12/dense_28/bias", # ... other transformer blocks "Transformer/encoder_norm/scale": "layer_normalization_49/gamma", "Transformer/encoder_norm/bias": "layer_normalization_49/beta", } model.layers model.summary() # Check shapes for the class token and embedding layers print(params_jax["cls"].shape) print(params_jax["embedding/kernel"].shape) print(params_jax["embedding/bias"].shape) print(params_jax["Transformer/posembed_input/pos_embedding"].shape) for w in model.layers[1].weights: print(w.name, w.shape) # Copy PatchingAndEmbedding layer model.layers[1].weights[0].assign(tf.Variable(params_jax["cls"])) model.layers[1].weights[1].assign(tf.Variable(params_jax["embedding/kernel"])) model.layers[1].weights[2].assign(tf.Variable(params_jax["embedding/bias"])) model.layers[1].weights[3].assign( tf.Variable( params_jax["Transformer/posembed_input/pos_embedding"].squeeze() ) ) # Check transformer block shapes between JAX and KCV print(params_jax["Transformer/encoderblock_4/LayerNorm_0/scale"].shape) print(params_jax["Transformer/encoderblock_4/LayerNorm_0/bias"].shape) print(params_jax["Transformer/encoderblock_4/LayerNorm_2/scale"].shape) print(params_jax[f"Transformer/encoderblock_4/LayerNorm_2/bias"].shape) print( params_jax[ f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/query/kernel" ].shape ) print( params_jax[ f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/query/bias" ].shape ) print( params_jax[ f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/key/kernel" ].shape ) print( params_jax[ f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/key/bias" ].shape ) print( params_jax[ f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/value/kernel" ].shape ) print( params_jax[ f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/value/bias" ].shape ) print( params_jax[ f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/out/kernel" ].shape ) print( params_jax[ f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/out/bias" ].shape ) print(params_jax[f"Transformer/encoderblock_4/MlpBlock_3/Dense_0/kernel"].shape) print(params_jax[f"Transformer/encoderblock_4/MlpBlock_3/Dense_0/bias"].shape) print(params_jax[f"Transformer/encoderblock_4/MlpBlock_3/Dense_1/kernel"].shape) print(params_jax[f"Transformer/encoderblock_4/MlpBlock_3/Dense_1/bias"].shape) for w in model.layers[4].weights: print(w.name, w.shape) # Copy Transformer Encoders for i in range(model_to_convert[1][1]): model.layers[3 + i].weights[0].assign( tf.Variable( params_jax[f"Transformer/encoderblock_{i}/LayerNorm_0/scale"] ) ) model.layers[3 + i].weights[1].assign( tf.Variable( params_jax[f"Transformer/encoderblock_{i}/LayerNorm_0/bias"] ) ) model.layers[3 + i].weights[2].assign( tf.Variable( params_jax[f"Transformer/encoderblock_{i}/LayerNorm_2/scale"] ) ) model.layers[3 + i].weights[3].assign( tf.Variable( params_jax[f"Transformer/encoderblock_{i}/LayerNorm_2/bias"] ) ) model.layers[3 + i].weights[4].assign( tf.Variable( params_jax[ f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/query/kernel" ] ) ) model.layers[3 + i].weights[5].assign( tf.Variable( params_jax[ f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/query/bias" ] ) ) model.layers[3 + i].weights[6].assign( tf.Variable( params_jax[ f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/key/kernel" ] ) ) model.layers[3 + i].weights[7].assign( tf.Variable( params_jax[ f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/key/bias" ] ) ) model.layers[3 + i].weights[8].assign( tf.Variable( params_jax[ f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/value/kernel" ] ) ) model.layers[3 + i].weights[9].assign( tf.Variable( params_jax[ f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/value/bias" ] ) ) model.layers[3 + i].weights[10].assign( tf.Variable( params_jax[ f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/out/kernel" ] ) ) model.layers[3 + i].weights[11].assign( tf.Variable( params_jax[ f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/out/bias" ].reshape(model.layers[3 + i].weights[11].shape) ) ) model.layers[3 + i].weights[12].assign( tf.Variable( params_jax[ f"Transformer/encoderblock_{i}/MlpBlock_3/Dense_0/kernel" ] ) ) model.layers[3 + i].weights[13].assign( tf.Variable( params_jax[f"Transformer/encoderblock_{i}/MlpBlock_3/Dense_0/bias"] ) ) model.layers[3 + i].weights[14].assign( tf.Variable( params_jax[ f"Transformer/encoderblock_{i}/MlpBlock_3/Dense_1/kernel" ] ) ) model.layers[3 + i].weights[15].assign( tf.Variable( params_jax[f"Transformer/encoderblock_{i}/MlpBlock_3/Dense_1/bias"] ) ) print(params_jax["Transformer/encoder_norm/scale"].shape) print(params_jax["Transformer/encoder_norm/bias"].shape) for w in model.layers[15].weights: print(w.name, w.shape) # Copy layer norm before class head model.layers[15].weights[0].assign( tf.Variable(params_jax["Transformer/encoder_norm/scale"]) ) model.layers[15].weights[1].assign( tf.Variable(params_jax["Transformer/encoder_norm/bias"]) ) print(params_jax["head/kernel"].shape) print(params_jax["head/bias"].shape) for w in model.layers[17].weights: print(w.name, w.shape) # Copy haed kernel and bias model.layers[17].weights[0].assign(tf.Variable(params_jax["head/kernel"])) model.layers[17].weights[1].assign(tf.Variable(params_jax["head/bias"])) import matplotlib.pyplot as plt import cv2 import numpy as np import PIL import urllib def url_to_array(url): req = urllib.request.urlopen(url) arr = np.array(bytearray(req.read()), dtype=np.int8) arr = cv2.imdecode(arr, -1) arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB) arr = cv2.resize(arr, (224, 224)) return arr def preprocess_image(image, label): image_resized = tf.image.resize(image, (224, 224)) image_resized = tf.cast(image_resized, tf.float32) image_resized = (image_resized - 127.5) / 127.5 return image_resized, label cat = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Cat_November_2010-1a.jpg/1200px-Cat_November_2010-1a.jpg" cat_img = url_to_array(cat) cat_img, _ = preprocess_image(cat_img, None) cat_img = tf.expand_dims(cat_img, 0) !wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt -O ilsvrc2012_wordnet_lemmas.txt with open("ilsvrc2012_wordnet_lemmas.txt", "r") as f: lines = f.readlines() imagenet_int_to_str = [line.rstrip() for line in lines] predictions = model.predict(cat_img) top_5 = tf.math.top_k(predictions, k=5, sorted=False) top_5 pred = np.argmax(predictions) imagenet_int_to_str[int(pred)] dog_url = "https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/dog-puppy-on-garden-royalty-free-image-1586966191.jpg?crop=1.00xw:0.669xh;0,0.190xh&resize=640:*" dog_img = url_to_array(dog_url) dog_img, _ = preprocess_image(dog_img, None) dog_img = tf.expand_dims(dog_img, 0) predictions = model.predict(dog_img) pred = np.argmax(predictions) imagenet_int_to_str[int(pred)] model.compile( "adam", "sparse_categorical_crossentropy", metrics=["accuracy", keras.metrics.SparseTopKCategoricalAccuracy(5)], ) import tensorflow_datasets as tfds (test_set), info = tfds.load( "imagenet_v2", split=["test"], as_supervised=True, with_info=True ) test_set = ( test_set[0] .shuffle(len(test_set[0])) .map(preprocess_image) .batch(32) .prefetch(tf.data.AUTOTUNE) ) for entry, label in test_set.take(1): print(label) model.evaluate(test_set) model.save(f"{model_to_convert[1][0]}.h5")<jupyter_output><empty_output>
keras-cv/keras_cv/tools/checkpoint_conversion/ViT_weight_conversion.ipynb/0
{ "file_path": "keras-cv/keras_cv/tools/checkpoint_conversion/ViT_weight_conversion.ipynb", "repo_id": "keras-cv", "token_count": 5612 }
23
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import inspect import json import os from keras_cv.backend import keras try: import kagglehub except ImportError: kagglehub = None KAGGLE_PREFIX = "kaggle://" GS_PREFIX = "gs://" def get_file(preset, path): """Download a preset file in necessary and return the local path.""" if not isinstance(preset, str): raise ValueError( f"A preset identifier must be a string. Received: preset={preset}" ) if preset.startswith(KAGGLE_PREFIX): if kagglehub is None: raise ImportError( "`from_preset()` requires the `kagglehub` package. " "Please install with `pip install kagglehub`." ) # Insert the kaggle framework into the handle. kaggle_handle = preset.removeprefix(KAGGLE_PREFIX) num_segments = len(kaggle_handle.split("/")) if num_segments not in (4, 5): raise ValueError( "Unexpected kaggle preset handle. Kaggle model handles " "should have the form " "kaggle://{org}/{model}/keras/{variant}[/{version}]. " "For example, " "'kaggle://keras/retinanet/keras/retinanet_base_en'. " f"Received: preset={preset}" ) return kagglehub.model_download(kaggle_handle, path) elif preset.startswith(GS_PREFIX): url = os.path.join(preset, path) url = url.replace(GS_PREFIX, "https://storage.googleapis.com/") subdir = preset.replace(GS_PREFIX, "gs_") subdir = subdir.replace("/", "_").replace("-", "_") filename = os.path.basename(path) subdir = os.path.join(subdir, os.path.dirname(path)) return keras.utils.get_file( filename, url, cache_subdir=os.path.join("models", subdir), ) elif os.path.exists(preset): # Assume a local filepath. return os.path.join(preset, path) else: raise ValueError( "Unknown preset identifier. A preset must be a one of:\n" "1) a built in preset identifier like `'mobilenet_v3_small'`\n" "2) a Kaggle Models handle like `'kaggle://keras/mobilenetv3/keras/mobilenet_v3_small'`\n" # noqa: E501 "3) a path to a local preset directory like `'./mobilenet_v3_small`\n" # noqa: E501 "Use `print(cls.presets.keys())` to view all built-in presets for " "API symbol `cls`.\n" f"Received: preset='{preset}'" ) def recursive_pop(config, key): """Remove a key from a nested config object""" config.pop(key, None) for value in config.values(): if isinstance(value, dict): recursive_pop(value, key) if isinstance(value, list): for v in value: if isinstance(v, dict): recursive_pop(v, key) def save_to_preset( layer, preset, save_weights=True, config_filename="config.json", weights_filename="model.weights.h5", ): """Save a KerasCV layer to a preset directory.""" os.makedirs(preset, exist_ok=True) # Optionally save weights. save_weights = save_weights and hasattr(layer, "save_weights") if save_weights: weights_path = os.path.join(preset, weights_filename) layer.save_weights(weights_path) # Save a serialized Keras object. config_path = os.path.join(preset, config_filename) config = keras.saving.serialize_keras_object(layer) # Include references to weights. config["weights"] = weights_filename if save_weights else None recursive_pop(config, "compile_config") recursive_pop(config, "build_config") with open(config_path, "w") as config_file: config_file.write(json.dumps(config, indent=4)) from keras_cv import __version__ as keras_cv_version keras_version = keras.version() if hasattr(keras, "version") else None # Save any associated metadata. if config_filename == "config.json": metadata = { "keras_version": keras_version, "keras_cv_version": keras_cv_version, "parameter_count": layer.count_params(), "date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"), } metadata_path = os.path.join(preset, "metadata.json") with open(metadata_path, "w") as metadata_file: metadata_file.write(json.dumps(metadata, indent=4)) def load_from_preset( preset, load_weights=None, input_shape=None, config_file="config.json", config_overrides={}, ): """Load a KerasCV layer to a preset directory.""" # Load a serialized Keras object. config_path = get_file(preset, config_file) with open(config_path) as config_file: config = json.load(config_file) config["config"] = {**config["config"], **config_overrides} layer = keras.saving.deserialize_keras_object(config) if input_shape is not None: layer.build(input_shape) # Check load_weights flag does not violate preset config. if load_weights is True and config["weights"] is None: raise ValueError( f"The specified preset `{preset}` does not include weights. " "Please remove the `load_weights` flag when calling " "`from_preset()` on this preset." ) # Default to loading weights if available. if load_weights is not False and config["weights"] is not None: weights_path = get_file(preset, config["weights"]) if hasattr(layer, "_layer_checkpoint_dependencies"): legacy_load_weights(layer, weights_path) else: layer.load_weights(weights_path) return layer def check_preset_class( preset, classes, config_file="config.json", ): """Validate a preset is being loaded on the correct class.""" config_path = get_file(preset, config_file) try: with open(config_path) as config_file: config = json.load(config_file) except: raise ValueError( f"The specified preset `{preset}` is unknown. " "Please check documentation to ensure the correct preset " "handle is being used." ) cls = keras.saving.get_registered_object(config["registered_name"]) if not isinstance(classes, (tuple, list)): classes = (classes,) # Subclass checking and alias checking if not any(issubclass(cls, obj) for obj in classes) and not any( issubclass(alias, cls) for alias in classes ): raise ValueError( f"Unexpected class in preset `'{preset}'`. " "When calling `from_preset()` on a class object, the preset class " f"much match allowed classes. Allowed classes are `{classes}`. " f"Received: `{cls}`." ) return cls def legacy_load_weights(layer, weights_path): # Hacky fix for TensorFlow 2.13 and 2.14 when loading a `.weights.h5` file. # We find the `Functional` class, and temporarily remove the # `_layer_checkpoint_dependencies` property, which on older version of # TensorFlow complete broke the variable paths for functional models. functional_cls = None for cls in inspect.getmro(layer.__class__): if cls.__name__ == "Functional": functional_cls = cls property = functional_cls._layer_checkpoint_dependencies functional_cls._layer_checkpoint_dependencies = {} layer.load_weights(weights_path) functional_cls._layer_checkpoint_dependencies = property
keras-cv/keras_cv/utils/preset_utils.py/0
{ "file_path": "keras-cv/keras_cv/utils/preset_utils.py", "repo_id": "keras-cv", "token_count": 3378 }
24
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from keras_cv import utils from keras_cv.api_export import keras_cv_export from keras_cv.utils import assert_matplotlib_installed from keras_cv.visualization.plot_image_gallery import plot_image_gallery def reshape_masks(segmentation_masks): rank = len(segmentation_masks.shape) if rank == 3: # (B, H, W) return segmentation_masks[..., np.newaxis] elif rank == 4: # (B, H, W, num_channels) OR (B, H, W, 1) if segmentation_masks.shape[-1] == 1: # Repeat the masks 3 times in order to build 3 channel # segmentation masks. return segmentation_masks.repeat(repeats=3, axis=-1) else: return np.argmax(segmentation_masks, axis=-1).repeat( repeats=3, axis=-1 ) def transform_segmentation_masks(segmentation_masks, num_classes, value_range): segmentation_masks = utils.to_numpy(segmentation_masks) segmentation_masks = reshape_masks(segmentation_masks=segmentation_masks) # Interpolate the segmentation masks from the range of (0, num_classes) # to the value range provided. segmentation_masks = utils.transform_value_range( segmentation_masks, original_range=(0, num_classes), target_range=value_range, ) return segmentation_masks @keras_cv_export("keras_cv.visualization.plot_segmentation_mask_gallery") def plot_segmentation_mask_gallery( images, value_range, num_classes, y_true=None, y_pred=None, rows=3, cols=3, **kwargs ): """Plots a gallery of images with corresponding segmentation masks. Args: images: a Tensor or NumPy array containing images to show in the gallery. The images should be batched and of shape (B, H, W, C). value_range: value range of the images. Common examples include `(0, 255)` and `(0, 1)`. num_classes: number of segmentation classes. y_true: (Optional) a Tensor or NumPy array representing the ground truth segmentation masks. The ground truth segmentation maps should be batched. y_pred: (Optional) a Tensor or NumPy array representing the predicted segmentation masks. The predicted segmentation masks should be batched. kwargs: keyword arguments to propagate to `keras_cv.visualization.plot_image_gallery()`. Usage: ```python train_ds = tfds.load( "oxford_iiit_pet", split="train", with_info=False, shuffle_files=True ) def unpackage_tfds_inputs(inputs): image = inputs["image"] segmentation_mask = inputs["segmentation_mask"] return image, segmentation_mask train_ds = train_ds.map(unpackage_tfds_inputs).ragged_batch(16) images, segmentation_masks = next(iter(train_ds.take(1))) keras_cv.visualization.plot_segmentation_mask_gallery( images, value_range=(0, 255), num_classes=3, # The number of classes for the oxford iiit pet dataset y_true=segmentation_masks, y_pred=None, scale=3, rows=2, cols=2, ) ``` ![Example segmentation mask gallery](https://i.imgur.com/aRkmJ1Q.png) """ assert_matplotlib_installed("plot_segmentation_mask_gallery") plotted_images = utils.to_numpy(images) # Initialize a list to collect the segmentation masks that will be # concatenated to the images for visualization. masks_to_contatenate = [plotted_images] if y_true is not None: plotted_y_true = transform_segmentation_masks( segmentation_masks=y_true, num_classes=num_classes, value_range=value_range, ) masks_to_contatenate.append(plotted_y_true) if y_pred is not None: plotted_y_pred = transform_segmentation_masks( segmentation_masks=y_pred, num_classes=num_classes, value_range=value_range, ) masks_to_contatenate.append(plotted_y_pred) # Concatenate the images and the masks together. plotted_images = np.concatenate(masks_to_contatenate, axis=2) plot_image_gallery( plotted_images, value_range, rows=rows, cols=cols, **kwargs )
keras-cv/keras_cv/visualization/plot_segmentation_mask_gallery.py/0
{ "file_path": "keras-cv/keras_cv/visualization/plot_segmentation_mask_gallery.py", "repo_id": "keras-cv", "token_count": 1954 }
25
import sys import keras from absl import flags import keras_cv flags.DEFINE_string("weights_path", None, "Path of weights to load") flags.DEFINE_string( "output_weights_path", None, "Path of notop weights to store" ) flags.DEFINE_string("model_name", None, "Name of the KerasCV.model") FLAGS = flags.FLAGS FLAGS(sys.argv) if not FLAGS.weights_path.endswith(".h5"): raise ValueError("Weights path must end in .h5") model = eval( f"keras_cv.models.{FLAGS.model_name}(include_rescaling=True, " f"include_top=True, num_classes=1000, weights=FLAGS.weights_path)" ) without_top = keras.models.Model(model.input, model.layers[-3].output) without_top.save_weights(FLAGS.output_weights_path) # Because the usage of keras_cv is in an eval() call, the linter is angry. # We include this to avoid an unused import warning keras_cv.models
keras-cv/shell/weights/remove_top.py/0
{ "file_path": "keras-cv/shell/weights/remove_top.py", "repo_id": "keras-cv", "token_count": 309 }
26
build_file: "keras-cv/.kokoro/github/ubuntu/gpu/build.sh" action { define_artifacts { regex: "**/sponge_log.log" regex: "**/sponge_log.xml" } } env_vars: { key: "KERAS_BACKEND" value: "tensorflow" } # Set timeout to 60 mins from default 180 mins timeout_mins: 60
keras-cv/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg/0
{ "file_path": "keras-cv/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg", "repo_id": "keras-cv", "token_count": 120 }
0
import time import warnings from unittest.mock import MagicMock import numpy as np import tensorflow as tf from matplotlib import pyplot as plt import keras_cv from keras_cv import bounding_box from keras_cv.layers import RandomShear from keras_cv.layers.preprocessing.base_image_augmentation_layer import ( BaseImageAugmentationLayer, ) from keras_cv.utils import preprocessing # Copied from: # https://github.com/keras-team/keras-cv/blob/cd12204b1f6df37b15359b6adf222b9ef0f67dc8/keras_cv/layers/preprocessing/random_shear.py#L27 class OldRandomShear(BaseImageAugmentationLayer): """A preprocessing layer which randomly shears images during training. This layer will apply random shearings to each image, filling empty space according to `fill_mode`. By default, random shears are only applied during training. At inference time, the layer does nothing. If you need to apply random shear at inference time, set `training` to True when calling the layer. Input pixel values can be of any range and any data type. Input shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format Output shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format Args: x_factor: A tuple of two floats, a single float or a `keras_cv.FactorSampler`. For each augmented image a value is sampled from the provided range. If a float is passed, the range is interpreted as `(0, x_factor)`. Values represent a percentage of the image to shear over. For example, 0.3 shears pixels up to 30% of the way across the image. All provided values should be positive. If `None` is passed, no shear occurs on the X axis. Defaults to `None`. y_factor: A tuple of two floats, a single float or a `keras_cv.FactorSampler`. For each augmented image a value is sampled from the provided range. If a float is passed, the range is interpreted as `(0, y_factor)`. Values represent a percentage of the image to shear over. For example, 0.3 shears pixels up to 30% of the way across the image. All provided values should be positive. If `None` is passed, no shear occurs on the Y axis. Defaults to `None`. interpolation: interpolation method used in the `ImageProjectiveTransformV3` op. Supported values are `"nearest"` and `"bilinear"`. Defaults to `"bilinear"`. fill_mode: fill_mode in the `ImageProjectiveTransformV3` op. Supported values are `"reflect"`, `"wrap"`, `"constant"`, and `"nearest"`. Defaults to `"reflect"`. fill_value: fill_value in the `ImageProjectiveTransformV3` op. A `Tensor` of type `float32`. The value to be filled when fill_mode is constant". Defaults to `0.0`. bounding_box_format: The format of bounding boxes of input dataset. Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py for more details on supported bounding box formats. seed: Integer. Used to create a random seed. """ def __init__( self, x_factor=None, y_factor=None, interpolation="bilinear", fill_mode="reflect", fill_value=0.0, bounding_box_format=None, seed=None, **kwargs, ): super().__init__(seed=seed, **kwargs) if x_factor is not None: self.x_factor = preprocessing.parse_factor( x_factor, max_value=None, param_name="x_factor", seed=seed ) else: self.x_factor = x_factor if y_factor is not None: self.y_factor = preprocessing.parse_factor( y_factor, max_value=None, param_name="y_factor", seed=seed ) else: self.y_factor = y_factor if x_factor is None and y_factor is None: warnings.warn( "RandomShear received both `x_factor=None` and " "`y_factor=None`. As a result, the layer will perform no " "augmentation." ) self.interpolation = interpolation self.fill_mode = fill_mode self.fill_value = fill_value self.seed = seed self.bounding_box_format = bounding_box_format def get_random_transformation(self, **kwargs): x = self._get_shear_amount(self.x_factor) y = self._get_shear_amount(self.y_factor) return (x, y) def _get_shear_amount(self, constraint): if constraint is None: return None invert = preprocessing.random_inversion(self._random_generator) return invert * constraint() def augment_image(self, image, transformation=None, **kwargs): image = tf.expand_dims(image, axis=0) x, y = transformation if x is not None: transform_x = OldRandomShear._format_transform( [1.0, x, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) image = preprocessing.transform( images=image, transforms=transform_x, interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, ) if y is not None: transform_y = OldRandomShear._format_transform( [1.0, 0.0, 0.0, y, 1.0, 0.0, 0.0, 0.0] ) image = preprocessing.transform( images=image, transforms=transform_y, interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, ) return tf.squeeze(image, axis=0) def augment_label(self, label, transformation=None, **kwargs): return label def augment_bounding_boxes( self, bounding_boxes, transformation, image=None, **kwargs ): if self.bounding_box_format is None: raise ValueError( "`RandomShear()` was called with bounding boxes, " "but no `bounding_box_format` was specified in the " "constructor. Please specify a bounding box format in the " "constructor. i.e. `RandomShear(bounding_box_format='xyxy')`" ) bounding_boxes = keras_cv.bounding_box.convert_format( bounding_boxes, source=self.bounding_box_format, target="rel_xyxy", images=image, dtype=self.compute_dtype, ) x, y = transformation extended_boxes = self._convert_to_extended_corners_format( bounding_boxes["boxes"] ) if x is not None: extended_boxes = ( self._apply_horizontal_transformation_to_bounding_box( extended_boxes, x ) ) # apply vertical shear if y is not None: extended_boxes = ( self._apply_vertical_transformation_to_bounding_box( extended_boxes, y ) ) boxes = self._convert_to_four_coordinate(extended_boxes, x, y) bounding_boxes = bounding_boxes.copy() bounding_boxes["boxes"] = boxes bounding_boxes = bounding_box.clip_to_image( bounding_boxes, images=image, bounding_box_format="rel_xyxy" ) bounding_boxes = keras_cv.bounding_box.convert_format( bounding_boxes, source="rel_xyxy", target=self.bounding_box_format, images=image, dtype=self.compute_dtype, ) return bounding_boxes def get_config(self): config = super().get_config() config.update( { "x_factor": self.x_factor, "y_factor": self.y_factor, "interpolation": self.interpolation, "fill_mode": self.fill_mode, "fill_value": self.fill_value, "bounding_box_format": self.bounding_box_format, "seed": self.seed, } ) return config @staticmethod def _format_transform(transform): transform = tf.convert_to_tensor(transform, dtype=tf.float32) return transform[tf.newaxis] @staticmethod def _convert_to_four_coordinate(extended_bboxes, x, y): """convert from extended coordinates to 4 coordinates system""" ( top_left_x, top_left_y, bottom_right_x, bottom_right_y, top_right_x, top_right_y, bottom_left_x, bottom_left_y, ) = tf.split(extended_bboxes, 8, axis=1) # choose x1,x2 when x>0 def positive_case_x(): final_x1 = bottom_left_x final_x2 = top_right_x return final_x1, final_x2 # choose x1,x2 when x<0 def negative_case_x(): final_x1 = top_left_x final_x2 = bottom_right_x return final_x1, final_x2 if x is not None: final_x1, final_x2 = tf.cond( tf.less(x, 0), negative_case_x, positive_case_x ) else: final_x1, final_x2 = top_left_x, bottom_right_x # choose y1,y2 when y > 0 def positive_case_y(): final_y1 = top_right_y final_y2 = bottom_left_y return final_y1, final_y2 # choose y1,y2 when y < 0 def negative_case_y(): final_y1 = top_left_y final_y2 = bottom_right_y return final_y1, final_y2 if y is not None: final_y1, final_y2 = tf.cond( tf.less(y, 0), negative_case_y, positive_case_y ) else: final_y1, final_y2 = top_left_y, bottom_right_y return tf.concat( [final_x1, final_y1, final_x2, final_y2], axis=1, ) @staticmethod def _apply_horizontal_transformation_to_bounding_box( extended_bounding_boxes, x ): # create transformation matrix [1,4] matrix = tf.stack([1.0, -x, 0, 1.0], axis=0) # reshape it to [2,2] matrix = tf.reshape(matrix, (2, 2)) # reshape unnormalized bboxes from [N,8] -> [N*4,2] new_bboxes = tf.reshape(extended_bounding_boxes, (-1, 2)) # [[1,x`],[y`,1]]*[x,y]->[new_x,new_y] transformed_bboxes = tf.reshape( tf.einsum("ij,kj->ki", matrix, new_bboxes), (-1, 8) ) return transformed_bboxes @staticmethod def _apply_vertical_transformation_to_bounding_box( extended_bounding_boxes, y ): # create transformation matrix [1,4] matrix = tf.stack([1.0, 0, -y, 1.0], axis=0) # reshape it to [2,2] matrix = tf.reshape(matrix, (2, 2)) # reshape unnormalized bboxes from [N,8] -> [N*4,2] new_bboxes = tf.reshape(extended_bounding_boxes, (-1, 2)) # [[1,x`],[y`,1]]*[x,y]->[new_x,new_y] transformed_bboxes = tf.reshape( tf.einsum("ij,kj->ki", matrix, new_bboxes), (-1, 8) ) return transformed_bboxes @staticmethod def _convert_to_extended_corners_format(boxes): """splits corner boxes top left,bottom right to 4 corners top left, bottom right,top right and bottom left""" x1, y1, x2, y2 = tf.split(boxes, [1, 1, 1, 1], axis=-1) new_boxes = tf.concat( [x1, y1, x2, y2, x2, y1, x1, y2], axis=-1, ) return new_boxes # End copy class RandomShearTest(tf.test.TestCase): def test_consistency_with_old_implementation(self): # Prepare inputs batch_size = 2 images = tf.random.uniform(shape=(batch_size, 64, 64, 3)) shear_x = tf.random.uniform(shape=()) shear_y = tf.random.uniform(shape=()) bounding_boxes = { "boxes": tf.constant( [ [[10.0, 20.0, 40.0, 50.0], [12.0, 22.0, 42.0, 54.0]], [[15.0, 16.0, 17, 18], [12.0, 22.0, 42.0, 54.0]], ], dtype=tf.float32, ), "classes": tf.constant([[0, 0], [0, 0]], dtype=tf.float32), } # Build layers old_layer = OldRandomShear( x_factor=(shear_x, shear_x), y_factor=(shear_y, shear_y), seed=1234, bounding_box_format="xyxy", ) new_layer = RandomShear( x_factor=(shear_x, shear_x), y_factor=(shear_y, shear_y), seed=1234, bounding_box_format="xyxy", ) # Disable random negation to get deterministic factor old_layer.get_random_transformation = MagicMock( return_value=( old_layer.x_factor(), old_layer.y_factor(), ) ) new_layer.get_random_transformation_batch = MagicMock( return_value={ "shear_x": new_layer.x_factor((batch_size, 1)), "shear_y": new_layer.y_factor((batch_size, 1)), } ) # Run inference + compare outputs: old_output = old_layer( {"images": images, "bounding_boxes": bounding_boxes} ) output = new_layer({"images": images, "bounding_boxes": bounding_boxes}) self.assertAllClose(output["images"], old_output["images"]) self.assertAllClose( output["bounding_boxes"]["boxes"].to_tensor(), old_output["bounding_boxes"]["boxes"].to_tensor(), ) self.assertAllClose( output["bounding_boxes"]["classes"], old_output["bounding_boxes"]["classes"], ) if __name__ == "__main__": # Run benchmark (x_train, _), _ = tf.keras.datasets.cifar10.load_data() x_train = x_train.astype(np.float32) num_images = [1000, 2000, 5000, 10000] results = {} aug_candidates = [RandomShear, OldRandomShear] aug_args = {"x_factor": (5, 5), "y_factor": (5, 5)} for aug in aug_candidates: # Eager Mode c = aug.__name__ layer = aug(**aug_args) runtimes = [] print(f"Timing {c}") for n_images in num_images: # warmup layer(x_train[:n_images]) t0 = time.time() r1 = layer(x_train[:n_images]) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1 - t0}") results[c] = runtimes # Graph Mode c = aug.__name__ + " Graph Mode" layer = aug(**aug_args) @tf.function() def apply_aug(inputs): return layer(inputs) runtimes = [] print(f"Timing {c}") for n_images in num_images: # warmup apply_aug(x_train[:n_images]) t0 = time.time() r1 = apply_aug(x_train[:n_images]) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1 - t0}") results[c] = runtimes # Not running with XLA as it does not support ImageProjectiveTransformV3 plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.savefig("comparison.png") # So we can actually see more relevant margins del results[aug_candidates[1].__name__] plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.savefig("comparison_no_old_eager.png") # Run unit tests tf.test.main()
keras-cv/benchmarks/vectorized_random_shear.py/0
{ "file_path": "keras-cv/benchmarks/vectorized_random_shear.py", "repo_id": "keras-cv", "token_count": 7923 }
1
# ImageNet Classification Training TODO(ianjjohnson): Write a README describing this directory.
keras-cv/examples/training/classification/imagenet/README.md/0
{ "file_path": "keras-cv/examples/training/classification/imagenet/README.md", "repo_id": "keras-cv", "token_count": 25 }
2
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def detect_if_tensorflow_uses_keras_3(): # We follow the version of keras that tensorflow is configured to use. try: from tensorflow import keras # Note that only recent versions of keras have a `version()` function. if hasattr(keras, "version") and keras.version().startswith("3."): return True except: raise ValueError( "Unable to import `keras` with `tensorflow`. Please check your " "Keras and Tensorflow version are compatible; Keras 3 requires " "TensorFlow 2.15 or later. See keras.io/getting_started for more " "information on installing Keras." ) # No `keras.version()` means we are on an old version of keras. return False _USE_KERAS_3 = detect_if_tensorflow_uses_keras_3() def keras_3(): """Check if Keras 3 is being used.""" return _USE_KERAS_3 def backend(): """Check the backend framework.""" if not keras_3(): return "tensorflow" import keras return keras.config.backend()
keras-cv/keras_cv/backend/config.py/0
{ "file_path": "keras-cv/keras_cv/backend/config.py", "repo_id": "keras-cv", "token_count": 574 }
3
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import tensorflow as tf from keras_cv import bounding_box from keras_cv.backend import ops from keras_cv.backend import random from keras_cv.tests.test_case import TestCase class MaskInvalidDetectionsTest(TestCase): def test_correctly_masks_based_on_max_dets(self): bounding_boxes = { "boxes": random.uniform((4, 100, 4)), "num_detections": ops.array([2, 3, 4, 2]), "classes": random.uniform((4, 100)), } result = bounding_box.mask_invalid_detections(bounding_boxes) negative_one_boxes = result["boxes"][:, 5:, :] self.assertAllClose( negative_one_boxes, -np.ones_like(ops.convert_to_numpy(negative_one_boxes)), ) preserved_boxes = result["boxes"][:, :2, :] self.assertAllClose(preserved_boxes, bounding_boxes["boxes"][:, :2, :]) boxes_from_image_3 = result["boxes"][2, :4, :] self.assertAllClose( boxes_from_image_3, bounding_boxes["boxes"][2, :4, :] ) @pytest.mark.tf_keras_only def test_ragged_outputs(self): bounding_boxes = { "boxes": np.stack( [ np.random.uniform(size=(10, 4)), np.random.uniform(size=(10, 4)), ] ), "num_detections": np.array([2, 3]), "classes": np.stack( [np.random.uniform(size=(10,)), np.random.uniform(size=(10,))] ), } result = bounding_box.mask_invalid_detections( bounding_boxes, output_ragged=True ) self.assertTrue(isinstance(result["boxes"], tf.RaggedTensor)) self.assertEqual(result["boxes"][0].shape[0], 2) self.assertEqual(result["boxes"][1].shape[0], 3) @pytest.mark.tf_keras_only def test_correctly_masks_confidence(self): bounding_boxes = { "boxes": np.stack( [ np.random.uniform(size=(10, 4)), np.random.uniform(size=(10, 4)), ] ), "confidence": np.random.uniform(size=(2, 10)), "num_detections": np.array([2, 3]), "classes": np.stack( [np.random.uniform(size=(10,)), np.random.uniform(size=(10,))] ), } result = bounding_box.mask_invalid_detections( bounding_boxes, output_ragged=True ) self.assertTrue(isinstance(result["boxes"], tf.RaggedTensor)) self.assertEqual(result["boxes"][0].shape[0], 2) self.assertEqual(result["boxes"][1].shape[0], 3) self.assertEqual(result["confidence"][0].shape[0], 2) self.assertEqual(result["confidence"][1].shape[0], 3)
keras-cv/keras_cv/bounding_box/mask_invalid_detections_test.py/0
{ "file_path": "keras-cv/keras_cv/bounding_box/mask_invalid_detections_test.py", "repo_id": "keras-cv", "token_count": 1562 }
4
/* Copyright 2022 The KerasCV Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" using namespace tensorflow; REGISTER_OP("KcvPairwiseIou3D") .Input("boxes_a: float") .Input("boxes_b: float") .Output("iou: float") .SetShapeFn([](tensorflow::shape_inference::InferenceContext* c) { c->set_output( 0, c->MakeShape({c->Dim(c->input(0), 0), c->Dim(c->input(1), 0)})); return tensorflow::Status(); }) .Doc(R"doc( Calculate pairwise IoUs between two set of 3D bboxes. Every bbox is represented as [center_x, center_y, center_z, dim_x, dim_y, dim_z, heading]. boxes_a: A tensor of shape [num_boxes_a, 7] boxes_b: A tensor of shape [num_boxes_b, 7] )doc");
keras-cv/keras_cv/custom_ops/ops/pairwise_iou_op.cc/0
{ "file_path": "keras-cv/keras_cv/custom_ops/ops/pairwise_iou_op.cc", "repo_id": "keras-cv", "token_count": 454 }
5
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from keras_cv.tests.test_case import TestCase try: from keras_cv.datasets.waymo import load except ImportError: # Waymo Open Dataset dependency may be missing, in which case we expect # these tests will be skipped based on the TEST_WAYMO_DEPS environment var. pass class WaymoOpenDatasetLoadTest(TestCase): def setUp(self): super().setUp() self.test_data_path = os.path.abspath( os.path.join(os.path.abspath(__file__), os.path.pardir, "test_data") ) self.test_data_file = "wod_one_frame.tfrecord" @pytest.mark.skipif( "TEST_WAYMO_DEPS" not in os.environ or os.environ["TEST_WAYMO_DEPS"] != "true", reason="Requires Waymo Open Dataset package", ) def test_load_from_directory(self): dataset = load(self.test_data_path) # Extract records into a list dataset = [record for record in dataset] self.assertEquals(len(dataset), 1) self.assertNotEqual(dataset[0]["timestamp_micros"], 0) @pytest.mark.skipif( "TEST_WAYMO_DEPS" not in os.environ or os.environ["TEST_WAYMO_DEPS"] != "true", reason="Requires Waymo Open Dataset package", ) def test_load_from_files(self): dataset = load([os.path.join(self.test_data_path, self.test_data_file)]) # Extract records into a list dataset = [record for record in dataset] self.assertEquals(len(dataset), 1) self.assertNotEqual(dataset[0]["timestamp_micros"], 0)
keras-cv/keras_cv/datasets/waymo/load_test.py/0
{ "file_path": "keras-cv/keras_cv/datasets/waymo/load_test.py", "repo_id": "keras-cv", "token_count": 818 }
6
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras BN_AXIS = 3 CONV_KERNEL_INITIALIZER = { "class_name": "VarianceScaling", "config": { "scale": 2.0, "mode": "fan_out", "distribution": "truncated_normal", }, } @keras_cv_export("keras_cv.layers.FusedMBConvBlock") class FusedMBConvBlock(keras.layers.Layer): """ Implementation of the FusedMBConv block (Fused Mobile Inverted Residual Bottleneck) from: [EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML](https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html) [EfficientNetV2: Smaller Models and Faster Training](https://arxiv.org/abs/2104.00298v3). FusedMBConv blocks are based on MBConv blocks, and replace the depthwise and 1x1 output convolution blocks with a single 3x3 convolution block, fusing them together - hence the name "FusedMBConv". Alongside MBConv blocks, they can be used in mobile-oriented and efficient architectures, and are present in architectures EfficientNet. FusedMBConv blocks follow a narrow-wide-narrow structure - expanding a 1x1 convolution, performing Squeeze-Excitation and then applying a 3x3 convolution, which is a more efficient operation than conventional wide-narrow-wide structures. As they're frequently used for models to be deployed to edge devices, they're implemented as a layer for ease of use and re-use. Args: input_filters: int, the number of input filters output_filters: int, the number of output filters expand_ratio: default 1, the ratio by which input_filters are multiplied to expand the structure in the middle expansion phase kernel_size: default 3, the kernel_size to apply to the expansion phase convolutions strides: default 1, the strides to apply to the expansion phase convolutions se_ratio: default 0.0, The filters used in the Squeeze-Excitation phase, and are chosen as the maximum between 1 and input_filters*se_ratio bn_momentum: default 0.9, the BatchNormalization momentum activation: default "swish", the activation function used between convolution operations survival_probability: float, the optional dropout rate to apply before the output convolution, defaults to 0.8 Returns: A `tf.Tensor` representing a feature map, passed through the FusedMBConv block Example usage: ``` inputs = tf.random.normal(shape=(1, 64, 64, 32), dtype=tf.float32) layer = keras_cv.layers.FusedMBConvBlock( input_filters=32, output_filters=32 ) output = layer(inputs) output.shape # TensorShape([1, 224, 224, 48]) ``` """ # noqa: E501 def __init__( self, input_filters: int, output_filters: int, expand_ratio=1, kernel_size=3, strides=1, se_ratio=0.0, bn_momentum=0.9, activation="swish", survival_probability: float = 0.8, **kwargs ): super().__init__(**kwargs) self.input_filters = input_filters self.output_filters = output_filters self.expand_ratio = expand_ratio self.kernel_size = kernel_size self.strides = strides self.se_ratio = se_ratio self.bn_momentum = bn_momentum self.activation = activation self.survival_probability = survival_probability self.filters = self.input_filters * self.expand_ratio self.filters_se = max(1, int(input_filters * se_ratio)) self.conv1 = keras.layers.Conv2D( filters=self.filters, kernel_size=kernel_size, strides=strides, kernel_initializer=CONV_KERNEL_INITIALIZER, padding="same", data_format="channels_last", use_bias=False, name=self.name + "expand_conv", ) self.bn1 = keras.layers.BatchNormalization( axis=BN_AXIS, momentum=self.bn_momentum, name=self.name + "expand_bn", ) self.act = keras.layers.Activation( self.activation, name=self.name + "expand_activation" ) self.bn2 = keras.layers.BatchNormalization( axis=BN_AXIS, momentum=self.bn_momentum, name=self.name + "bn" ) self.se_conv1 = keras.layers.Conv2D( self.filters_se, 1, padding="same", activation=self.activation, kernel_initializer=CONV_KERNEL_INITIALIZER, name=self.name + "se_reduce", ) self.se_conv2 = keras.layers.Conv2D( self.filters, 1, padding="same", activation="sigmoid", kernel_initializer=CONV_KERNEL_INITIALIZER, name=self.name + "se_expand", ) self.output_conv = keras.layers.Conv2D( filters=self.output_filters, kernel_size=1 if expand_ratio != 1 else kernel_size, strides=1, kernel_initializer=CONV_KERNEL_INITIALIZER, padding="same", data_format="channels_last", use_bias=False, name=self.name + "project_conv", ) self.bn3 = keras.layers.BatchNormalization( axis=BN_AXIS, momentum=self.bn_momentum, name=self.name + "project_bn", ) if self.survival_probability: self.dropout = keras.layers.Dropout( self.survival_probability, noise_shape=(None, 1, 1, 1), name=self.name + "drop", ) def build(self, input_shape): if self.name is None: self.name = keras.backend.get_uid("block0") def call(self, inputs): # Expansion phase if self.expand_ratio != 1: x = self.conv1(inputs) x = self.bn1(x) x = self.act(x) else: x = inputs # Squeeze and excite if 0 < self.se_ratio <= 1: se = keras.layers.GlobalAveragePooling2D( name=self.name + "se_squeeze" )(x) if BN_AXIS == 1: se_shape = (self.filters, 1, 1) else: se_shape = (1, 1, self.filters) se = keras.layers.Reshape(se_shape, name=self.name + "se_reshape")( se ) se = self.se_conv1(se) se = self.se_conv2(se) x = keras.layers.multiply([x, se], name=self.name + "se_excite") # Output phase: x = self.output_conv(x) x = self.bn3(x) if self.expand_ratio == 1: x = self.act(x) # Residual: if self.strides == 1 and self.input_filters == self.output_filters: if self.survival_probability: x = self.dropout(x) x = keras.layers.Add(name=self.name + "add")([x, inputs]) return x def get_config(self): config = { "input_filters": self.input_filters, "output_filters": self.output_filters, "expand_ratio": self.expand_ratio, "kernel_size": self.kernel_size, "strides": self.strides, "se_ratio": self.se_ratio, "bn_momentum": self.bn_momentum, "activation": self.activation, "survival_probability": self.survival_probability, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/layers/fusedmbconv.py/0
{ "file_path": "keras-cv/keras_cv/layers/fusedmbconv.py", "repo_id": "keras-cv", "token_count": 3862 }
7
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import tensorflow as tf from keras_cv.layers.object_detection.roi_generator import ROIGenerator from keras_cv.tests.test_case import TestCase @pytest.mark.tf_keras_only class ROIGeneratorTest(TestCase): def test_single_tensor(self): roi_generator = ROIGenerator("xyxy", nms_iou_threshold_train=0.96) rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], ] ) expected_rois = tf.gather(rpn_boxes, [[1, 3, 2]], batch_dims=1) expected_rois = tf.concat([expected_rois, tf.zeros([1, 1, 4])], axis=1) rpn_scores = tf.constant( [ [0.6, 0.9, 0.2, 0.3], ] ) # selecting the 1st, then 3rd, then 2nd as they don't overlap # 0th box overlaps with 1st box expected_roi_scores = tf.gather(rpn_scores, [[1, 3, 2]], batch_dims=1) expected_roi_scores = tf.concat( [expected_roi_scores, tf.zeros([1, 1])], axis=1 ) rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores) def test_single_level_single_batch_roi_ignore_box(self): roi_generator = ROIGenerator("xyxy", nms_iou_threshold_train=0.96) rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], ] ) expected_rois = tf.gather(rpn_boxes, [[1, 3, 2]], batch_dims=1) expected_rois = tf.concat([expected_rois, tf.zeros([1, 1, 4])], axis=1) rpn_boxes = {2: rpn_boxes} rpn_scores = tf.constant( [ [0.6, 0.9, 0.2, 0.3], ] ) # selecting the 1st, then 3rd, then 2nd as they don't overlap # 0th box overlaps with 1st box expected_roi_scores = tf.gather(rpn_scores, [[1, 3, 2]], batch_dims=1) expected_roi_scores = tf.concat( [expected_roi_scores, tf.zeros([1, 1])], axis=1 ) rpn_scores = {2: rpn_scores} rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores) def test_single_level_single_batch_roi_all_box(self): # for iou between 1st and 2nd box is 0.9604, so setting to 0.97 to # such that NMS would treat them as different ROIs roi_generator = ROIGenerator("xyxy", nms_iou_threshold_train=0.97) rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], ] ) expected_rois = tf.gather(rpn_boxes, [[1, 0, 3, 2]], batch_dims=1) rpn_boxes = {2: rpn_boxes} rpn_scores = tf.constant( [ [0.6, 0.9, 0.2, 0.3], ] ) # selecting the 1st, then 0th, then 3rd, then 2nd as they don't overlap expected_roi_scores = tf.gather( rpn_scores, [[1, 0, 3, 2]], batch_dims=1 ) rpn_scores = {2: rpn_scores} rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores) def test_single_level_propose_rois(self): roi_generator = ROIGenerator("xyxy") rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], [ [2, 2, 4, 4], [3, 3, 6, 6], [3.1, 3.1, 6.1, 6.1], [1, 1, 8, 8], ], ] ) expected_rois = tf.gather( rpn_boxes, [[1, 3, 2], [1, 3, 0]], batch_dims=1 ) expected_rois = tf.concat([expected_rois, tf.zeros([2, 1, 4])], axis=1) rpn_boxes = {2: rpn_boxes} rpn_scores = tf.constant([[0.6, 0.9, 0.2, 0.3], [0.1, 0.8, 0.3, 0.5]]) # 1st batch -- selecting the 1st, then 3rd, then 2nd as they don't # overlap # 2nd batch -- selecting the 1st, then 3rd, then 0th as they don't # overlap expected_roi_scores = tf.gather( rpn_scores, [[1, 3, 2], [1, 3, 0]], batch_dims=1 ) expected_roi_scores = tf.concat( [expected_roi_scores, tf.zeros([2, 1])], axis=1 ) rpn_scores = {2: rpn_scores} rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores) def test_two_level_single_batch_propose_rois_ignore_box(self): roi_generator = ROIGenerator("xyxy") rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], [ [2, 2, 4, 4], [3, 3, 6, 6], [3.1, 3.1, 6.1, 6.1], [1, 1, 8, 8], ], ] ) expected_rois = tf.constant( [ [ [0.1, 0.1, 9.9, 9.9], [3, 3, 6, 6], [1, 1, 8, 8], [2, 2, 8, 8], [5, 5, 10, 10], [2, 2, 4, 4], [0, 0, 0, 0], [0, 0, 0, 0], ] ] ) rpn_boxes = {2: rpn_boxes[0:1], 3: rpn_boxes[1:2]} rpn_scores = tf.constant([[0.6, 0.9, 0.2, 0.3], [0.1, 0.8, 0.3, 0.5]]) # 1st batch -- selecting the 1st, then 3rd, then 2nd as they don't # overlap # 2nd batch -- selecting the 1st, then 3rd, then 0th as they don't # overlap expected_roi_scores = [ [ 0.9, 0.8, 0.5, 0.3, 0.2, 0.1, 0.0, 0.0, ] ] rpn_scores = {2: rpn_scores[0:1], 3: rpn_scores[1:2]} rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores) def test_two_level_single_batch_propose_rois_all_box(self): roi_generator = ROIGenerator("xyxy", nms_iou_threshold_train=0.99) rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], [ [2, 2, 4, 4], [3, 3, 6, 6], [3.1, 3.1, 6.1, 6.1], [1, 1, 8, 8], ], ] ) expected_rois = tf.constant( [ [ [0.1, 0.1, 9.9, 9.9], [3, 3, 6, 6], [0, 0, 10, 10], [1, 1, 8, 8], [2, 2, 8, 8], [3.1, 3.1, 6.1, 6.1], [5, 5, 10, 10], [2, 2, 4, 4], ] ] ) rpn_boxes = {2: rpn_boxes[0:1], 3: rpn_boxes[1:2]} rpn_scores = tf.constant([[0.6, 0.9, 0.2, 0.3], [0.1, 0.8, 0.3, 0.5]]) # 1st batch -- selecting the 1st, then 0th, then 3rd, then 2nd as they # don't overlap # 2nd batch -- selecting the 1st, then 3rd, then 2nd, then 0th as they # don't overlap expected_roi_scores = [ [ 0.9, 0.8, 0.6, 0.5, 0.3, 0.3, 0.2, 0.1, ] ] rpn_scores = {2: rpn_scores[0:1], 3: rpn_scores[1:2]} rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores)
keras-cv/keras_cv/layers/object_detection/roi_generator_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/object_detection/roi_generator_test.py", "repo_id": "keras-cv", "token_count": 5746 }
8
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.backend import keras from keras_cv.layers.object_detection_3d.voxelization import DynamicVoxelization from keras_cv.tests.test_case import TestCase class VoxelizationTest(TestCase): def test_voxelization_output_shape_no_z(self): layer = DynamicVoxelization( voxel_size=[0.1, 0.1, 1000], spatial_size=[-20, 20, -20, 20, -20, 20], ) point_xyz = tf.random.uniform( shape=[1, 1000, 3], minval=-5, maxval=5, dtype=tf.float32 ) point_feature = tf.random.uniform( shape=[1, 1000, 4], minval=-10, maxval=10, dtype=tf.float32 ) point_mask = tf.cast( tf.random.uniform( shape=[1, 1000], minval=0, maxval=2, dtype=tf.int32 ), tf.bool, ) output = layer(point_xyz, point_feature, point_mask) # (20 - (-20)) / 0.1 = 400, (20 - (-20) ) / 1000 = 0.4 # the last dimension is replaced with MLP dimension, z dimension is # skipped self.assertEqual(output.shape, (1, 400, 400, 128)) def test_voxelization_output_shape_with_z(self): layer = DynamicVoxelization( voxel_size=[0.1, 0.1, 1], spatial_size=[-20, 20, -20, 20, -15, 15], ) point_xyz = tf.random.uniform( shape=[1, 1000, 3], minval=-5, maxval=5, dtype=tf.float32 ) point_feature = tf.random.uniform( shape=[1, 1000, 4], minval=-10, maxval=10, dtype=tf.float32 ) point_mask = tf.cast( tf.random.uniform( shape=[1, 1000], minval=0, maxval=2, dtype=tf.int32 ), tf.bool, ) output = layer(point_xyz, point_feature, point_mask) # (20 - (-20)) / 0.1 = 400, (20 - (-20) ) / 1000 = 0.4 # (15 - (-15)) / 1 = 30 # the last dimension is replaced with MLP dimension, z dimension is # skipped self.assertEqual(output.shape, (1, 400, 400, 30, 128)) def test_voxelization_numerical(self): layer = DynamicVoxelization( voxel_size=[1.0, 1.0, 10.0], spatial_size=[-5, 5, -5, 5, -2, 2], ) # Make the point net a no-op to allow us to verify the voxelization. layer.point_net_dense = keras.layers.Identity() # TODO(ianstenbit): use Identity here once it supports masking layer.point_net_norm = keras.layers.Lambda(lambda x: x) layer.point_net_activation = keras.layers.Identity() point_xyz = tf.constant( [ [ [-4.9, -4.9, 0.0], [4.4, 4.4, 0.0], ] ] ) point_feature = tf.constant( [ [ [1.0], [2.0], ] ] ) point_mask = tf.constant([True], shape=[1, 2]) output = layer(point_xyz, point_feature, point_mask) # [-4.9, -4.9, 0] will the mapped to the upper leftmost voxel, # the first element is point feature, # the second / third element is -4.9 - (-5) = 0.1 self.assertAllClose(output[0][0][0], [1.0, 0.1, 0.1, 0]) # [4.4, 4.4, 0] will the mapped to the lower rightmost voxel, # the first element is point feature # the second / third element is 4.4 - 4 = 0.4, because the # voxel range is [-5, 4] for 10 voxels. self.assertAllClose(output[0][-1][-1], [2.0, 0.4, 0.4, 0])
keras-cv/keras_cv/layers/object_detection_3d/voxelization_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/object_detection_3d/voxelization_test.py", "repo_id": "keras-cv", "token_count": 2012 }
9
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.api_export import keras_cv_export from keras_cv.layers.preprocessing.base_image_augmentation_layer import ( BaseImageAugmentationLayer, ) @keras_cv_export("keras_cv.layers.FourierMix") class FourierMix(BaseImageAugmentationLayer): """FourierMix implements the FMix data augmentation technique. Args: alpha: Float value for beta distribution. Inverse scale parameter for the gamma distribution. This controls the shape of the distribution from which the smoothing values are sampled. Defaults to 0.5, which is a recommended value in the paper. decay_power: A float value representing the decay power, defaults to 3, as recommended in the paper. seed: Integer. Used to create a random seed. References: - [FMix paper](https://arxiv.org/abs/2002.12047). Sample usage: ```python (images, labels), _ = keras.datasets.cifar10.load_data() fourier_mix = keras_cv.layers.preprocessing.FourierMix(0.5) augmented_images, updated_labels = fourier_mix( {'images': images, 'labels': labels} ) # output == {'images': updated_images, 'labels': updated_labels} ``` """ def __init__(self, alpha=0.5, decay_power=3, seed=None, **kwargs): super().__init__(seed=seed, **kwargs) self.alpha = alpha self.decay_power = decay_power self.seed = seed def _sample_from_beta(self, alpha, beta, shape): sample_alpha = tf.random.gamma( shape, alpha=alpha, ) sample_beta = tf.random.gamma( shape, alpha=beta, ) return sample_alpha / (sample_alpha + sample_beta) @staticmethod def _fftfreq(signal_size, sample_spacing=1): """This function returns the sample frequencies of a discrete fourier transform. The result array contains the frequency bin centers starting at 0 using the sample spacing. """ results = tf.concat( [ tf.range((signal_size - 1) / 2 + 1, dtype=tf.int32), tf.range(-(signal_size // 2), 0, dtype=tf.int32), ], 0, ) return results / (signal_size * sample_spacing) def _apply_fftfreq(self, h, w): # Applying the fourier transform across 2 dimensions (height and width). fx = FourierMix._fftfreq(w)[: w // 2 + 1 + w % 2] fy = FourierMix._fftfreq(h) fy = tf.expand_dims(fy, -1) return tf.math.sqrt(fx * fx + fy * fy) def _get_spectrum(self, freqs, decay_power, channel, h, w): # Function to apply a low pass filter by decaying its high frequency # components. scale = tf.ones(1) / tf.cast( tf.math.maximum( freqs, tf.convert_to_tensor([1 / tf.reduce_max([w, h])]) ) ** decay_power, tf.float32, ) param_size = tf.concat( [tf.constant([channel]), tf.shape(freqs), tf.constant([2])], 0 ) param = self._random_generator.normal(param_size) scale = tf.expand_dims(scale, -1)[None, :] return scale * param def _sample_mask_from_transform(self, decay, shape, ch=1): # Sampling low frequency map from fourier transform. freqs = self._apply_fftfreq(shape[0], shape[1]) spectrum = self._get_spectrum(freqs, decay, ch, shape[0], shape[1]) spectrum = tf.complex(spectrum[:, 0], spectrum[:, 1]) mask = tf.math.real(tf.signal.irfft2d(spectrum, shape)) mask = mask[:1, : shape[0], : shape[1]] mask = mask - tf.reduce_min(mask) mask = mask / tf.reduce_max(mask) return mask def _binarise_mask(self, mask, lam, in_shape): # Create the final mask from the sampled values. idx = tf.argsort(tf.reshape(mask, [-1]), direction="DESCENDING") mask = tf.reshape(mask, [-1]) num = tf.cast( tf.math.round(lam * tf.cast(tf.size(mask), tf.float32)), tf.int32 ) updates = tf.concat( [ tf.ones((num,), tf.float32), tf.zeros((tf.size(mask) - num,), tf.float32), ], 0, ) mask = tf.scatter_nd( tf.expand_dims(idx, -1), updates, tf.expand_dims(tf.size(mask), -1) ) mask = tf.reshape(mask, in_shape) return mask def _batch_augment(self, inputs): images = inputs.get("images", None) labels = inputs.get("labels", None) segmentation_masks = inputs.get("segmentation_masks", None) if images is None or (labels is None and segmentation_masks is None): raise ValueError( "FourierMix expects inputs in a dictionary with format " '{"images": images, "labels": labels}.' '{"images": images, "segmentation_masks": segmentation_masks}.' f"Got: inputs = {inputs}" ) images, masks, lambda_sample, permutation_order = self._fourier_mix( images ) if labels is not None: labels = self._update_labels( labels, lambda_sample, permutation_order ) inputs["labels"] = labels if segmentation_masks is not None: segmentation_masks = self._update_segmentation_masks( segmentation_masks, masks, permutation_order ) inputs["segmentation_masks"] = segmentation_masks inputs["images"] = images return inputs def _augment(self, inputs): raise ValueError( "FourierMix received a single image to `call`. The layer relies on " "combining multiple examples, and as such will not behave as " "expected. Please call the layer with 2 or more samples." ) def _fourier_mix(self, images): shape = tf.shape(images) permutation_order = tf.random.shuffle( tf.range(0, shape[0]), seed=self.seed ) lambda_sample = self._sample_from_beta( self.alpha, self.alpha, (shape[0],) ) # generate masks utilizing mapped calls masks = tf.map_fn( lambda x: self._sample_mask_from_transform( self.decay_power, shape[1:-1] ), tf.range(shape[0], dtype=tf.float32), ) # binarise masks utilizing mapped calls masks = tf.map_fn( lambda i: self._binarise_mask( masks[i], lambda_sample[i], shape[1:-1] ), tf.range(shape[0], dtype=tf.int32), fn_output_signature=tf.float32, ) masks = tf.expand_dims(masks, -1) fmix_images = tf.gather(images, permutation_order) images = masks * images + (1.0 - masks) * fmix_images return images, masks, lambda_sample, permutation_order def _update_labels(self, labels, lambda_sample, permutation_order): labels_for_fmix = tf.gather(labels, permutation_order) # for broadcasting batch_size = tf.expand_dims(tf.shape(labels)[0], -1) labels_rank = tf.rank(labels) broadcast_shape = tf.concat( [batch_size, tf.ones(labels_rank - 1, tf.int32)], 0 ) lambda_sample = tf.reshape(lambda_sample, broadcast_shape) labels = ( lambda_sample * labels + (1.0 - lambda_sample) * labels_for_fmix ) return labels def _update_segmentation_masks( self, segmentation_masks, masks, permutation_order ): fmix_segmentation_masks = tf.gather( segmentation_masks, permutation_order ) segmentation_masks = ( masks * segmentation_masks + (1.0 - masks) * fmix_segmentation_masks ) return segmentation_masks def get_config(self): config = { "alpha": self.alpha, "decay_power": self.decay_power, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/layers/preprocessing/fourier_mix.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/fourier_mix.py", "repo_id": "keras-cv", "token_count": 3974 }
10
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from absl.testing import parameterized from keras_cv import layers from keras_cv.backend import ops from keras_cv.tests.test_case import TestCase class RandAugmentTest(TestCase): def test_zero_rate_pass_through(self): rand_augment = layers.RandAugment( value_range=(0, 255), rate=0.0, ) xs = np.ones((2, 512, 512, 3)) ys = rand_augment(xs) self.assertAllClose(ys, xs) @parameterized.named_parameters( ("0", 0), ("20", 0.2), ("55", 0.55), ("10", 1.0), ) def test_runs_with_magnitude(self, magnitude): rand_augment = layers.RandAugment( value_range=(0, 255), rate=0.5, magnitude=magnitude ) xs = np.ones((2, 512, 512, 3)) ys = rand_augment(xs) self.assertEqual(ys.shape, (2, 512, 512, 3)) @parameterized.named_parameters( ("0_255", 0, 255), ("neg_1_1", -1, 1), ("0_1", 0, 1), ) def test_runs_with_value_range(self, low, high): rand_augment = layers.RandAugment( augmentations_per_image=3, magnitude=0.5, rate=1.0, value_range=(low, high), ) xs = tf.random.uniform((2, 512, 512, 3), low, high, dtype=tf.float32) ys = ops.convert_to_numpy(rand_augment(xs)) self.assertTrue(np.all(np.logical_and(ys >= low, ys <= high))) @parameterized.named_parameters( ("float32", "float32"), ("int32", "int32"), ("uint8", "uint8"), ) def test_runs_with_dtype_input(self, dtype): rand_augment = layers.RandAugment(value_range=(0, 255)) xs = np.ones((2, 512, 512, 3), dtype=dtype) ys = rand_augment(xs) self.assertEqual(ys.shape, (2, 512, 512, 3)) @parameterized.named_parameters( ("0_255", 0, 255), ("neg1_1", -1, 1), ("0_1", 0, 1), ) def test_standard_policy_respects_value_range(self, lower, upper): my_layers = layers.RandAugment.get_standard_policy( value_range=(lower, upper), magnitude=1.0, magnitude_stddev=0.2 ) rand_augment = layers.RandomAugmentationPipeline( layers=my_layers, augmentations_per_image=3 ) xs = tf.random.uniform((2, 512, 512, 3), lower, upper, dtype=tf.float32) ys = ops.convert_to_numpy(rand_augment(xs)) self.assertLessEqual(np.max(ys), upper) self.assertGreaterEqual(np.min(ys), lower) def test_runs_unbatched(self): rand_augment = layers.RandAugment( augmentations_per_image=3, magnitude=0.5, rate=1.0, value_range=(0, 255), ) xs = tf.random.uniform((512, 512, 3), 0, 255, dtype=tf.float32) ys = rand_augment(xs) self.assertEqual(xs.shape, ys.shape) def test_runs_no_geo(self): rand_augment = layers.RandAugment( augmentations_per_image=2, magnitude=0.5, rate=1.0, geometric=False, value_range=(0, 255), ) self.assertFalse( any( [ isinstance(x, layers.RandomTranslation) for x in rand_augment.layers ] ) ) self.assertFalse( any( [isinstance(x, layers.RandomShear) for x in rand_augment.layers] ) )
keras-cv/keras_cv/layers/preprocessing/rand_augment_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/rand_augment_test.py", "repo_id": "keras-cv", "token_count": 1973 }
11
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import tensorflow as tf from keras_cv.layers import preprocessing from keras_cv.tests.test_case import TestCase class RandomColorJitterTest(TestCase): # Test 1: Check input and output shape. It should match. def test_return_shapes(self): batch_input = np.ones((2, 512, 512, 3)) non_square_batch_input = np.ones((2, 1024, 512, 3)) unbatch_input = np.ones((512, 512, 3)) layer = preprocessing.RandomColorJitter( value_range=(0, 255), brightness_factor=0.5, contrast_factor=(0.5, 0.9), saturation_factor=(0.5, 0.9), hue_factor=0.5, ) batch_output = layer(batch_input, training=True) non_square_batch_output = layer(non_square_batch_input, training=True) unbatch_output = layer(unbatch_input, training=True) self.assertEqual(batch_output.shape, (2, 512, 512, 3)) self.assertEqual(non_square_batch_output.shape, (2, 1024, 512, 3)) self.assertEqual(unbatch_output.shape, (512, 512, 3)) # Test 2: Check if the factor ranges are set properly. def test_factor_range(self): layer = preprocessing.RandomColorJitter( value_range=(0, 255), brightness_factor=(-0.2, 0.5), contrast_factor=(0.5, 0.9), saturation_factor=(0.5, 0.9), hue_factor=(0.5, 0.9), ) self.assertEqual(layer.brightness_factor, (-0.2, 0.5)) self.assertEqual(layer.contrast_factor, (0.5, 0.9)) self.assertEqual(layer.saturation_factor, (0.5, 0.9)) self.assertEqual(layer.hue_factor, (0.5, 0.9)) # Test 3: Test if it is OK to run on graph mode. @pytest.mark.tf_only def test_in_tf_function(self): inputs = np.ones((2, 512, 512, 3)) layer = preprocessing.RandomColorJitter( value_range=(0, 255), brightness_factor=0.5, contrast_factor=(0.5, 0.9), saturation_factor=(0.5, 0.9), hue_factor=0.5, ) @tf.function def augment(x): return layer(x, training=True) outputs = augment(inputs) self.assertNotAllClose(inputs, outputs) # Test 4: Check if get_config and from_config work as expected. def test_config(self): layer = preprocessing.RandomColorJitter( value_range=(0, 255), brightness_factor=0.5, contrast_factor=(0.5, 0.9), saturation_factor=(0.5, 0.9), hue_factor=0.5, ) config = layer.get_config() self.assertEqual(config["brightness_factor"], 0.5) self.assertEqual(config["contrast_factor"], (0.5, 0.9)) self.assertEqual(config["saturation_factor"], (0.5, 0.9)) self.assertEqual(config["hue_factor"], 0.5) reconstructed_layer = preprocessing.RandomColorJitter.from_config( config ) self.assertEqual( reconstructed_layer.brightness_factor, layer.brightness_factor ) self.assertEqual( reconstructed_layer.contrast_factor, layer.contrast_factor ) self.assertEqual( reconstructed_layer.saturation_factor, layer.saturation_factor ) self.assertEqual(reconstructed_layer.hue_factor, layer.hue_factor)
keras-cv/keras_cv/layers/preprocessing/random_color_jitter_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_color_jitter_test.py", "repo_id": "keras-cv", "token_count": 1717 }
12
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from keras_cv.backend import ops from keras_cv.layers.preprocessing.rescaling import Rescaling from keras_cv.tests.test_case import TestCase class RescalingTest(TestCase): def test_rescaling_correctness_float(self): layer = Rescaling(scale=1.0 / 127.5, offset=-1.0) inputs = tf.random.uniform((2, 4, 5, 3)) outputs = layer(inputs) self.assertAllClose(outputs, inputs * (1.0 / 127.5) - 1) def test_rescaling_correctness_int(self): layer = Rescaling(scale=1.0 / 127.5, offset=-1) inputs = tf.random.uniform((2, 4, 5, 3), 0, 100, dtype="int32") outputs = layer(inputs) outputs = ops.convert_to_numpy(outputs) self.assertEqual(outputs.dtype.name, "float32") self.assertAllClose( outputs, ops.convert_to_numpy(inputs) * (1.0 / 127.5) - 1 ) def test_config_with_custom_name(self): layer = Rescaling(0.5, name="rescaling") config = layer.get_config() layer_1 = Rescaling.from_config(config) self.assertEqual(layer_1.name, layer.name) def test_unbatched_image(self): layer = Rescaling(scale=1.0 / 127.5, offset=-1) inputs = tf.random.uniform((4, 5, 3)) outputs = layer(inputs) self.assertAllClose(outputs, inputs * (1.0 / 127.5) - 1) def test_output_dtypes(self): inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64") layer = Rescaling(0.5) self.assertAllEqual( ops.convert_to_numpy(layer(inputs)).dtype.name, "float32" ) layer = Rescaling(0.5, dtype="uint8") self.assertAllEqual( ops.convert_to_numpy(layer(inputs)).dtype.name, "uint8" )
keras-cv/keras_cv/layers/preprocessing/rescaling_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/rescaling_test.py", "repo_id": "keras-cv", "token_count": 973 }
13
# Copyright 2022 Waymo LLC. # # Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501 import tensorflow as tf from keras_cv import point_cloud from keras_cv.api_export import keras_cv_export from keras_cv.backend import random from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES POINTCLOUD_LABEL_INDEX = base_augmentation_layer_3d.POINTCLOUD_LABEL_INDEX @keras_cv_export("keras_cv.layers.FrustumRandomDroppingPoints") class FrustumRandomDroppingPoints( base_augmentation_layer_3d.BaseAugmentationLayer3D ): """A preprocessing layer which randomly drops point within a randomly generated frustum during training. This layer will randomly select a point from the point cloud as the center of a frustum then generate a frustum based on r_distance, theta_width, and phi_width. Points inside the selected frustum are randomly dropped (setting all features to zero) based on drop_rate. The point_clouds tensor shape must be specific and cannot be dynamic. Input shape: point_clouds: 3D (multi frames) float32 Tensor with shape [num of frames, num of points, num of point features]. The first 5 features are [x, y, z, class, range]. bounding_boxes: 3D (multi frames) float32 Tensor with shape [num of frames, num of boxes, num of box features]. The first 7 features are [x, y, z, dx, dy, dz, phi]. Output shape: A dictionary of Tensors with the same shape as input Tensors. Arguments: r_distance: A float scalar sets the starting distance of a frustum. theta_width: A float scalar sets the theta width of a frustum. phi_width: A float scalar sets the phi width of a frustum. drop_rate: A float scalar sets the probability threshold for dropping the points. exclude_classes: An optional int scalar or a list of ints. Points with the specified class(es) will not be dropped. """ def __init__( self, r_distance, theta_width, phi_width, drop_rate=None, exclude_classes=None, **kwargs, ): super().__init__(**kwargs) if not isinstance(exclude_classes, (tuple, list)): exclude_classes = [exclude_classes] if r_distance < 0: raise ValueError( f"r_distance must be >=0, but got r_distance={r_distance}" ) if theta_width < 0: raise ValueError( f"theta_width must be >=0, but got theta_width={theta_width}" ) if phi_width < 0: raise ValueError( f"phi_width must be >=0, but got phi_width={phi_width}" ) drop_rate = drop_rate if drop_rate else 0.0 if drop_rate > 1: raise ValueError( f"drop_rate must be <=1, but got drop_rate={drop_rate}" ) self._r_distance = r_distance self._theta_width = theta_width self._phi_width = phi_width keep_probability = 1 - drop_rate self._keep_probability = keep_probability self._exclude_classes = exclude_classes def get_config(self): return { "r_distance": self._r_distance, "theta_width": self._theta_width, "phi_width": self._phi_width, "drop_rate": 1 - self._keep_probability, "exclude_classes": self._exclude_classes, } def get_random_transformation(self, point_clouds, **kwargs): # Randomly select a point from the first frame as the center of the # frustum. valid_points = point_clouds[0, :, POINTCLOUD_LABEL_INDEX] > 0 num_valid_points = tf.math.reduce_sum(tf.cast(valid_points, tf.int32)) randomly_select_point_index = tf.random.uniform( (), minval=0, maxval=num_valid_points, dtype=tf.int32 ) randomly_select_frustum_center = tf.boolean_mask( point_clouds[0], valid_points, axis=0 )[randomly_select_point_index, :POINTCLOUD_LABEL_INDEX] num_frames, num_points, _ = point_clouds.get_shape().as_list() frustum_mask = [] for f in range(num_frames): frustum_mask.append( point_cloud.within_a_frustum( point_clouds[f], randomly_select_frustum_center, self._r_distance, self._theta_width, self._phi_width, )[tf.newaxis, :, tf.newaxis] ) frustum_mask = tf.concat(frustum_mask, axis=0) # Generate mask along point dimension. random_point_mask = ( random.uniform( [1, num_points, 1], minval=0.0, maxval=1, seed=self._random_generator, ) < self._keep_probability ) # Do not drop points outside the frustum mask. random_point_mask = tf.where(~frustum_mask, True, random_point_mask) return {"point_mask": random_point_mask} def augment_point_clouds_bounding_boxes( self, point_clouds, bounding_boxes, transformation, **kwargs ): point_mask = transformation["point_mask"] # Do not drop points that are protected by setting the corresponding # point_mask = 1.0. protected_points = tf.zeros_like(point_clouds[0, :, -1], dtype=tf.bool) for excluded_class in self._exclude_classes: protected_points |= point_clouds[0, :, -1] == excluded_class point_mask = tf.where( protected_points[tf.newaxis, :, tf.newaxis], True, point_mask ) point_clouds = tf.where(point_mask, point_clouds, 0.0) return (point_clouds, bounding_boxes)
keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_dropping_points.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_dropping_points.py", "repo_id": "keras-cv", "token_count": 2680 }
14
# Copyright 2022 Waymo LLC. # # Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501 import tensorflow as tf from keras_cv.api_export import keras_cv_export from keras_cv.backend import random from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d from keras_cv.ops import iou_3d from keras_cv.point_cloud import is_within_any_box3d POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES OBJECT_POINT_CLOUDS = base_augmentation_layer_3d.OBJECT_POINT_CLOUDS OBJECT_BOUNDING_BOXES = base_augmentation_layer_3d.OBJECT_BOUNDING_BOXES @keras_cv_export("keras_cv.layers.RandomCopyPaste") class RandomCopyPaste(base_augmentation_layer_3d.BaseAugmentationLayer3D): """A preprocessing layer which randomly pastes object point clouds and bounding boxes during training. This layer will randomly paste object point clouds and bounding boxes. OBJECT_POINT_CLOUDS and OBJECT_BOUNDING_BOXES are generated by running group_points_by_bounding_boxes function on additional input frames. We use the first frame to check overlap between existing bounding boxes and pasted bounding boxes. If a to-be-pasted bounding box overlaps with an existing bounding box and object point clouds, we do not paste the additional bounding box. We load 5 times max_paste_bounding_boxes to check overlap. If a to-be-pasted bounding box overlaps with existing background point clouds, we paste the additional bounding box and replace the background point clouds with object point clouds. Input shape: point_clouds: 3D (multi frames) float32 Tensor with shape [num of frames, num of points, num of point features]. The first 5 features are [x, y, z, class, range]. bounding_boxes: 3D (multi frames) float32 Tensor with shape [num of frames, num of boxes, num of box features]. Boxes are expected to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py Output shape: A tuple of two Tensors (point_clouds, bounding_boxes) with the same shape as input Tensors. Arguments: label_index: An optional int scalar sets the target object index. Bounding boxes and corresponding point clouds with box class == label_index will be saved as OBJECT_BOUNDING_BOXES and OBJECT_POINT_CLOUDS. If label index is None, all valid bounding boxes (box class !=0) are used. min_paste_bounding_boxes: A int scalar sets the min number of pasted bounding boxes. max_paste_bounding_boxes: A int scalar sets the max number of pasted bounding boxes. """ def __init__( self, label_index=None, min_paste_bounding_boxes=0, max_paste_bounding_boxes=10, **kwargs ): super().__init__(**kwargs) if label_index and label_index < 0: raise ValueError("label_index must be >=0.") if min_paste_bounding_boxes < 0: raise ValueError("min_paste_bounding_boxes must be >=0.") if max_paste_bounding_boxes < 0: raise ValueError("max_paste_bounding_boxes must be >=0.") if max_paste_bounding_boxes < min_paste_bounding_boxes: raise ValueError( "max_paste_bounding_boxes must be >= min_paste_bounding_boxes." ) self._label_index = label_index self._min_paste_bounding_boxes = min_paste_bounding_boxes self._max_paste_bounding_boxes = max_paste_bounding_boxes def get_config(self): return { "label_index": self._label_index, "min_paste_bounding_boxes": self._min_paste_bounding_boxes, "max_paste_bounding_boxes": self._max_paste_bounding_boxes, } def get_random_transformation( self, point_clouds, bounding_boxes, object_point_clouds, object_bounding_boxes, **kwargs ): del point_clouds num_paste_bounding_boxes = random.uniform( (), minval=self._min_paste_bounding_boxes, maxval=self._max_paste_bounding_boxes, seed=self._random_generator, ) num_paste_bounding_boxes = tf.cast( num_paste_bounding_boxes, dtype=tf.int32 ) num_existing_bounding_boxes = tf.shape(bounding_boxes)[1] if self._label_index: object_mask = ( object_bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS] == self._label_index ) object_point_clouds = tf.boolean_mask( object_point_clouds, object_mask, axis=1 ) object_bounding_boxes = tf.boolean_mask( object_bounding_boxes, object_mask, axis=1 ) shuffle_index = tf.range(tf.shape(object_point_clouds)[1]) shuffle_index = tf.random.shuffle(shuffle_index) object_point_clouds = tf.gather( object_point_clouds, shuffle_index, axis=1 ) object_bounding_boxes = tf.gather( object_bounding_boxes, shuffle_index, axis=1 ) # Load at most 5 times num_paste_bounding_boxes to check overlaps. num_compare_bounding_boxes = tf.math.minimum( num_paste_bounding_boxes * 5, tf.shape(object_point_clouds)[1], ) object_point_clouds = object_point_clouds[ :, :num_compare_bounding_boxes, : ] object_bounding_boxes = object_bounding_boxes[ :, :num_compare_bounding_boxes, : ] # Use the current frame to check overlap between existing bounding boxes # and pasted bounding boxes all_bounding_boxes = tf.concat( [bounding_boxes, object_bounding_boxes], axis=1 )[0, :, :7] iou = iou_3d(all_bounding_boxes, all_bounding_boxes) iou = tf.linalg.band_part(iou, -1, 0) iou_sum = tf.reduce_sum(iou[num_existing_bounding_boxes:], axis=1) # A non overlapping bounding box has a 1.0 IoU with itself. non_overlapping_mask = tf.reshape(iou_sum <= 1, [-1]) object_point_clouds = tf.boolean_mask( object_point_clouds, non_overlapping_mask, axis=1 ) object_bounding_boxes = tf.boolean_mask( object_bounding_boxes, non_overlapping_mask, axis=1 ) object_point_clouds = object_point_clouds[ :, :num_paste_bounding_boxes, : ] object_bounding_boxes = object_bounding_boxes[ :, :num_paste_bounding_boxes, : ] return { OBJECT_POINT_CLOUDS: object_point_clouds, OBJECT_BOUNDING_BOXES: object_bounding_boxes, } def augment_point_clouds_bounding_boxes( self, point_clouds, bounding_boxes, transformation, **kwargs ): additional_object_point_clouds = transformation[OBJECT_POINT_CLOUDS] additional_object_bounding_boxes = transformation[OBJECT_BOUNDING_BOXES] original_point_clouds_shape = point_clouds.get_shape().as_list() original_object_bounding_boxes = bounding_boxes.get_shape().as_list() points_in_paste_bounding_boxes = is_within_any_box3d( point_clouds[..., :3], additional_object_bounding_boxes[..., :7] ) num_frames = point_clouds.get_shape().as_list()[0] point_clouds_list = [] bounding_boxes_list = [] for frame_index in range(num_frames): # Remove background point clouds that are in object_bounding_boxes. existing_point_clouds_mask = ~points_in_paste_bounding_boxes[ frame_index, : ] & tf.math.greater(point_clouds[frame_index, :, 3], 0.0) existing_point_clouds = tf.boolean_mask( point_clouds[frame_index], existing_point_clouds_mask, axis=0 ) paste_point_clouds = tf.boolean_mask( additional_object_point_clouds[frame_index], tf.math.greater( additional_object_point_clouds[frame_index, :, :, 3], 0.0 ), axis=0, ) point_clouds_list += [ tf.concat([paste_point_clouds, existing_point_clouds], axis=0) ] existing_bounding_boxes = tf.boolean_mask( bounding_boxes[frame_index], tf.math.greater( bounding_boxes[frame_index, :, CENTER_XYZ_DXDYDZ_PHI.CLASS], 0.0, ), ) paste_bounding_boxes = tf.boolean_mask( additional_object_bounding_boxes[frame_index], tf.math.greater( additional_object_bounding_boxes[ frame_index, :, CENTER_XYZ_DXDYDZ_PHI.CLASS ], 0.0, ), axis=0, ) bounding_boxes_list += [ tf.concat( [paste_bounding_boxes, existing_bounding_boxes], axis=0 ) ] point_clouds = tf.ragged.stack(point_clouds_list) bounding_boxes = tf.ragged.stack(bounding_boxes_list) return ( point_clouds.to_tensor(shape=original_point_clouds_shape), bounding_boxes.to_tensor(shape=original_object_bounding_boxes), ) def _augment(self, inputs): result = inputs point_clouds = inputs.get(POINT_CLOUDS, None) bounding_boxes = inputs.get(BOUNDING_BOXES, None) object_point_clouds = inputs.get(OBJECT_POINT_CLOUDS, None) object_bounding_boxes = inputs.get(OBJECT_BOUNDING_BOXES, None) transformation = self.get_random_transformation( point_clouds=point_clouds, bounding_boxes=bounding_boxes, object_point_clouds=object_point_clouds, object_bounding_boxes=object_bounding_boxes, ) point_clouds, bounding_boxes = self.augment_point_clouds_bounding_boxes( point_clouds, bounding_boxes=bounding_boxes, transformation=transformation, ) result.update( {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes} ) return result def call(self, inputs): # TODO(ianstenbit): Support the model input format. point_clouds = inputs[POINT_CLOUDS] bounding_boxes = inputs[BOUNDING_BOXES] if point_clouds.shape.rank == 3 and bounding_boxes.shape.rank == 3: return self._augment(inputs) elif point_clouds.shape.rank == 4 and bounding_boxes.shape.rank == 4: batch = point_clouds.get_shape().as_list()[0] point_clouds_list = [] bounding_boxes_list = [] for i in range(batch): no_batch_inputs = { POINT_CLOUDS: inputs[POINT_CLOUDS][i], BOUNDING_BOXES: inputs[BOUNDING_BOXES][i], OBJECT_POINT_CLOUDS: inputs[OBJECT_POINT_CLOUDS][i], OBJECT_BOUNDING_BOXES: inputs[OBJECT_BOUNDING_BOXES][i], } no_batch_result = self._augment(no_batch_inputs) point_clouds_list += [ no_batch_result[POINT_CLOUDS][tf.newaxis, ...] ] bounding_boxes_list += [ no_batch_result[BOUNDING_BOXES][tf.newaxis, ...] ] inputs[POINT_CLOUDS] = tf.concat(point_clouds_list, axis=0) inputs[BOUNDING_BOXES] = tf.concat(bounding_boxes_list, axis=0) return inputs else: raise ValueError( "Point clouds augmentation layers are expecting inputs " "point clouds and bounding boxes to be rank 3D (Frame, " "Point, Feature) or 4D (Batch, Frame, Point, Feature) " "tensors. Got shape: {} and {}".format( point_clouds.shape, bounding_boxes.shape ) )
keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_copy_paste.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_copy_paste.py", "repo_id": "keras-cv", "token_count": 5867 }
15
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from absl.testing import parameterized from tensorflow import keras from keras_cv import layers as cv_layers from keras_cv.backend.config import keras_3 from keras_cv.layers.vit_layers import PatchingAndEmbedding from keras_cv.tests.test_case import TestCase from keras_cv.utils import test_utils class SerializationTest(TestCase): @parameterized.named_parameters( ("AutoContrast", cv_layers.AutoContrast, {"value_range": (0, 255)}), ("ChannelShuffle", cv_layers.ChannelShuffle, {"seed": 1}), ("CutMix", cv_layers.CutMix, {"seed": 1}), ("Equalization", cv_layers.Equalization, {"value_range": (0, 255)}), ("Grayscale", cv_layers.Grayscale, {}), ("GridMask", cv_layers.GridMask, {"seed": 1}), ("MixUp", cv_layers.MixUp, {"seed": 1}), ("Mosaic", cv_layers.Mosaic, {"seed": 1}), ( "RepeatedAugmentation", cv_layers.RepeatedAugmentation, { "augmenters": [ cv_layers.RandAugment(value_range=(0, 1)), cv_layers.RandomFlip(), ] }, ), ( "RandomChannelShift", cv_layers.RandomChannelShift, {"value_range": (0, 255), "factor": 0.5}, ), ( "RandomTranslation", cv_layers.RandomTranslation, {"width_factor": (0, 0.5), "height_factor": 0.5}, ), ( "Posterization", cv_layers.Posterization, {"bits": 3, "value_range": (0, 255)}, ), ( "RandomColorDegeneration", cv_layers.RandomColorDegeneration, {"factor": 0.5, "seed": 1}, ), ( "RandomCutout", cv_layers.RandomCutout, {"height_factor": 0.2, "width_factor": 0.2, "seed": 1}, ), ( "RandomHue", cv_layers.RandomHue, {"factor": 0.5, "value_range": (0, 255), "seed": 1}, ), ( "RandomSaturation", cv_layers.RandomSaturation, {"factor": 0.5, "seed": 1}, ), ( "RandomSharpness", cv_layers.RandomSharpness, {"factor": 0.5, "value_range": (0, 255), "seed": 1}, ), ( "RandomShear", cv_layers.RandomShear, {"x_factor": 0.3, "x_factor": 0.3, "seed": 1}, ), ( "JitteredResize", cv_layers.JitteredResize, { "target_size": (640, 640), "scale_factor": (0.8, 1.25), "bounding_box_format": "xywh", }, ), ("Solarization", cv_layers.Solarization, {"value_range": (0, 255)}), ( "RandAugment", cv_layers.RandAugment, { "value_range": (0, 255), "magnitude": 0.5, "augmentations_per_image": 3, "rate": 0.3, "magnitude_stddev": 0.1, }, ), ( "RandomAugmentationPipeline", cv_layers.RandomAugmentationPipeline, { "layers": [ cv_layers.RandomSaturation(factor=0.5), cv_layers.RandomColorDegeneration(factor=0.5), ], "augmentations_per_image": 1, "rate": 1.0, }, ), ("RandomBrightness", cv_layers.RandomBrightness, {"factor": 0.5}), ( "RandomChoice", cv_layers.RandomChoice, {"layers": [], "seed": 3, "auto_vectorize": False}, ), ( "RandomColorJitter", cv_layers.RandomColorJitter, { "value_range": (0, 255), "brightness_factor": (-0.2, 0.5), "contrast_factor": (0.5, 0.9), "saturation_factor": (0.5, 0.9), "hue_factor": (0.5, 0.9), "seed": 1, }, ), ( "RandomContrast", cv_layers.RandomContrast, {"value_range": (0, 255), "factor": 0.5}, ), ( "RandomCropAndResize", cv_layers.RandomCropAndResize, { "target_size": (224, 224), "crop_area_factor": (0.8, 1.0), "aspect_ratio_factor": (3 / 4, 4 / 3), }, ), ( "DropBlock2D", cv_layers.DropBlock2D, {"rate": 0.1, "block_size": (7, 7), "seed": 1234}, ), ( "StochasticDepth", cv_layers.StochasticDepth, {"rate": 0.1}, ), ( "SqueezeAndExcite2D", cv_layers.SqueezeAndExcite2D, { "filters": 16, "bottleneck_filters": 4, "squeeze_activation": keras.layers.ReLU(), "excite_activation": keras.activations.relu, }, ), ( "DropPath", cv_layers.DropPath, { "rate": 0.2, }, ), ( "RandomApply", cv_layers.RandomApply, { "rate": 0.5, "layer": None, "seed": 1234, }, ), ( "RandomJpegQuality", cv_layers.RandomJpegQuality, {"factor": (75, 100)}, ), ( "AugMix", cv_layers.AugMix, { "value_range": (0, 255), "severity": 0.3, "num_chains": 3, "chain_depth": -1, "alpha": 1.0, "seed": 1, }, ), ( "RandomRotation", cv_layers.RandomRotation, { "factor": 0.5, }, ), ( "RandomAspectRatio", cv_layers.RandomAspectRatio, { "factor": (0.9, 1.1), "seed": 1233, }, ), ( "SpatialPyramidPooling", cv_layers.SpatialPyramidPooling, { "dilation_rates": [6, 12, 18], "num_channels": 256, "activation": "relu", "dropout": 0.1, }, ), ( "PatchingAndEmbedding", PatchingAndEmbedding, {"project_dim": 128, "patch_size": 16}, ), ( "TransformerEncoder", cv_layers.TransformerEncoder, { "project_dim": 128, "num_heads": 2, "mlp_dim": 128, "mlp_dropout": 0.1, "attention_dropout": 0.1, "activation": "gelu", "layer_norm_epsilon": 1e-06, }, ), ( "FrustumRandomDroppingPoints", cv_layers.FrustumRandomDroppingPoints, { "r_distance": 10.0, "theta_width": 1.0, "phi_width": 2.0, "drop_rate": 0.1, }, ), ( "FrustumRandomPointFeatureNoise", cv_layers.FrustumRandomPointFeatureNoise, { "r_distance": 10.0, "theta_width": 1.0, "phi_width": 2.0, "max_noise_level": 0.1, }, ), ( "GlobalRandomDroppingPoints", cv_layers.GlobalRandomDroppingPoints, {"drop_rate": 0.1}, ), ( "GlobalRandomFlip", cv_layers.GlobalRandomFlip, {}, ), ( "GlobalRandomRotation", cv_layers.GlobalRandomRotation, { "max_rotation_angle_x": 0.5, "max_rotation_angle_y": 0.6, "max_rotation_angle_z": 0.7, }, ), ( "GlobalRandomScaling", cv_layers.GlobalRandomScaling, { "x_factor": (0.2, 1.0), "y_factor": (0.3, 1.1), "z_factor": (0.4, 1.3), "preserve_aspect_ratio": False, }, ), ( "GlobalRandomTranslation", cv_layers.GlobalRandomTranslation, {"x_stddev": 0.2, "y_stddev": 1.0, "z_stddev": 0.0}, ), ( "GroupPointsByBoundingBoxes", cv_layers.GroupPointsByBoundingBoxes, { "label_index": 1, "min_points_per_bounding_boxes": 1, "max_points_per_bounding_boxes": 4, }, ), ( "RandomCopyPaste", cv_layers.RandomCopyPaste, { "label_index": 1, "min_paste_bounding_boxes": 1, "max_paste_bounding_boxes": 10, }, ), ( "RandomDropBox", cv_layers.RandomDropBox, {"label_index": 1, "max_drop_bounding_boxes": 3}, ), ( "SwapBackground", cv_layers.SwapBackground, {}, ), ( "RandomZoom", cv_layers.RandomZoom, {"height_factor": 0.2, "width_factor": 0.5}, ), ( "RandomCrop", cv_layers.RandomCrop, { "height": 100, "width": 200, }, ), ( "MBConvBlock", cv_layers.MBConvBlock, { "input_filters": 16, "output_filters": 16, }, ), ( "FusedMBConvBlock", cv_layers.FusedMBConvBlock, { "input_filters": 16, "output_filters": 16, }, ), ( "Rescaling", cv_layers.Rescaling, { "scale": 1, "offset": 0.5, }, ), ( "MultiClassNonMaxSuppression", cv_layers.MultiClassNonMaxSuppression, { "bounding_box_format": "yxyx", "from_logits": True, }, ), ( "NonMaxSuppression", cv_layers.NonMaxSuppression, { "bounding_box_format": "yxyx", "from_logits": True, }, ), ) def test_layer_serialization(self, layer_cls, init_args): # TODO: Some layers are not yet compatible with Keras 3. if keras_3: skip_layers = [ cv_layers.DropBlock2D, cv_layers.FrustumRandomDroppingPoints, cv_layers.FrustumRandomPointFeatureNoise, cv_layers.GlobalRandomDroppingPoints, cv_layers.GlobalRandomFlip, cv_layers.GlobalRandomRotation, cv_layers.GlobalRandomScaling, cv_layers.GlobalRandomTranslation, cv_layers.GroupPointsByBoundingBoxes, cv_layers.RandomCopyPaste, cv_layers.RandomDropBox, cv_layers.SwapBackground, cv_layers.SqueezeAndExcite2D, # TODO: Fails in Keras 3 ] if layer_cls in skip_layers: self.skipTest("Not supported on Keras 3") layer = layer_cls(**init_args) config = layer.get_config() self.assertAllInitParametersAreInConfig(layer_cls, config) model = keras.models.Sequential([layer]) model_config = model.get_config() reconstructed_model = keras.Sequential().from_config(model_config) reconstructed_layer = reconstructed_model.layers[0] self.assertTrue( test_utils.config_equals( layer.get_config(), reconstructed_layer.get_config() ) ) def assertAllInitParametersAreInConfig(self, layer_cls, config): excluded_name = ["args", "kwargs", "*"] parameter_names = { v for v in inspect.signature(layer_cls).parameters.keys() if v not in excluded_name } intersection_with_config = { v for v in config.keys() if v in parameter_names } self.assertSetEqual(parameter_names, intersection_with_config)
keras-cv/keras_cv/layers/serialization_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/serialization_test.py", "repo_id": "keras-cv", "token_count": 7877 }
16
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops @keras_cv_export("keras_cv.losses.GIoULoss") class GIoULoss(keras.losses.Loss): """Implements the Generalized IoU Loss GIoU loss is a modified IoU loss commonly used for object detection. This loss aims to directly optimize the IoU score between true boxes and predicted boxes. GIoU loss adds a penalty term to the IoU loss that takes in account the area of the smallest box enclosing both the boxes being considered for the iou. The length of the last dimension should be 4 to represent the bounding boxes. Args: bounding_box_format: a case-insensitive string (for example, "xyxy"). Each bounding box is defined by these 4 values.For detailed information on the supported formats, see the [KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/). axis: the axis along which to mean the ious, defaults to -1. References: - [GIoU paper](https://arxiv.org/pdf/1902.09630) - [TFAddons Implementation](https://www.tensorflow.org/addons/api_docs/python/tfa/losses/GIoULoss) Sample Usage: ```python y_true = np.random.uniform(size=(5, 10, 5), low=0, high=10) y_pred = np.random.uniform(size=(5, 10, 4), low=0, high=10) loss = GIoULoss(bounding_box_format = "xyWH") loss(y_true, y_pred).numpy() ``` Usage with the `compile()` API: ```python model.compile(optimizer='adam', loss=keras_cv.losses.GIoULoss()) ``` """ # noqa: E501 def __init__(self, bounding_box_format, axis=-1, **kwargs): super().__init__(**kwargs) self.bounding_box_format = bounding_box_format self.axis = axis def _compute_enclosure(self, boxes1, boxes2): y_min1, x_min1, y_max1, x_max1 = ops.split(boxes1[..., :4], 4, axis=-1) y_min2, x_min2, y_max2, x_max2 = ops.split(boxes2[..., :4], 4, axis=-1) boxes2_rank = len(boxes2.shape) perm = [1, 0] if boxes2_rank == 2 else [0, 2, 1] # [N, M] or [batch_size, N, M] zeros_t = ops.cast(0, boxes1.dtype) enclose_ymin = ops.minimum(y_min1, ops.transpose(y_min2, perm)) enclose_xmin = ops.minimum(x_min1, ops.transpose(x_min2, perm)) enclose_ymax = ops.maximum(y_max1, ops.transpose(y_max2, perm)) enclose_xmax = ops.maximum(x_max1, ops.transpose(x_max2, perm)) enclose_width = ops.maximum(zeros_t, enclose_xmax - enclose_xmin) enclose_height = ops.maximum(zeros_t, enclose_ymax - enclose_ymin) enclose_area = enclose_width * enclose_height return enclose_area def _compute_giou(self, boxes1, boxes2): boxes1_rank = len(boxes1.shape) boxes2_rank = len(boxes2.shape) if boxes1_rank not in [2, 3]: raise ValueError( "compute_iou() expects boxes1 to be batched, or to be " f"unbatched. Received len(boxes1.shape)={boxes1_rank}, " f"len(boxes2.shape)={boxes2_rank}. Expected either " "len(boxes1.shape)=2 AND or len(boxes1.shape)=3." ) if boxes2_rank not in [2, 3]: raise ValueError( "compute_iou() expects boxes2 to be batched, or to be " f"unbatched. Received len(boxes1.shape)={boxes1_rank}, " f"len(boxes2.shape)={boxes2_rank}. Expected either " "len(boxes2.shape)=2 AND or len(boxes2.shape)=3." ) target_format = "yxyx" if bounding_box.is_relative(self.bounding_box_format): target_format = bounding_box.as_relative(target_format) boxes1 = bounding_box.convert_format( boxes1, source=self.bounding_box_format, target=target_format ) boxes2 = bounding_box.convert_format( boxes2, source=self.bounding_box_format, target=target_format ) intersect_area = bounding_box.iou._compute_intersection(boxes1, boxes2) boxes1_area = bounding_box.iou._compute_area(boxes1) boxes2_area = bounding_box.iou._compute_area(boxes2) boxes2_area_rank = len(boxes2_area.shape) boxes2_axis = 1 if (boxes2_area_rank == 2) else 0 boxes1_area = ops.expand_dims(boxes1_area, axis=-1) boxes2_area = ops.expand_dims(boxes2_area, axis=boxes2_axis) union_area = boxes1_area + boxes2_area - intersect_area iou = ops.divide(intersect_area, union_area + keras.backend.epsilon()) # giou calculation enclose_area = self._compute_enclosure(boxes1, boxes2) return iou - ops.divide( (enclose_area - union_area), enclose_area + keras.backend.epsilon() ) def call(self, y_true, y_pred, sample_weight=None): if sample_weight is not None: raise ValueError( "GIoULoss does not support sample_weight. Please ensure " f"sample_weight=None. Got sample_weight={sample_weight}" ) y_pred = ops.convert_to_tensor(y_pred) y_true = ops.cast(y_true, y_pred.dtype) if y_pred.shape[-1] != 4: raise ValueError( "GIoULoss expects y_pred.shape[-1] to be 4 to represent the " f"bounding boxes. Received y_pred.shape[-1]={y_pred.shape[-1]}." ) if y_true.shape[-1] != 4: raise ValueError( "GIoULoss expects y_true.shape[-1] to be 4 to represent the " f"bounding boxes. Received y_true.shape[-1]={y_true.shape[-1]}." ) if y_true.shape[-2] != y_pred.shape[-2]: raise ValueError( "GIoULoss expects number of boxes in y_pred to be equal to the " "number of boxes in y_true. Received number of boxes in " f"y_true={y_true.shape[-2]} and number of boxes in " f"y_pred={y_pred.shape[-2]}." ) giou = self._compute_giou(y_true, y_pred) giou = ops.diagonal( giou, ) if self.axis == "no_reduction": warnings.warn( "`axis='no_reduction'` is a temporary API, and the API " "contract will be replaced in the future with a more generic " "solution covering all losses." ) else: giou = ops.mean(giou, axis=self.axis) return 1 - giou def get_config(self): config = super().get_config() config.update( { "bounding_box_format": self.bounding_box_format, "axis": self.axis, } ) return config
keras-cv/keras_cv/losses/giou_loss.py/0
{ "file_path": "keras-cv/keras_cv/losses/giou_loss.py", "repo_id": "keras-cv", "token_count": 3367 }
17
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import types import numpy as np import tensorflow as tf import tensorflow.keras as keras from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.backend import ops from keras_cv.metrics import coco class HidePrints: """A basic internal only context manager to hide print statements.""" def __enter__(self): self._original_stdout = sys.stdout sys.stdout = open(os.devnull, "w") def __exit__(self, exc_type, exc_val, exc_tb): sys.stdout.close() sys.stdout = self._original_stdout def _box_concat(boxes): """Concatenates two bounding box batches together.""" result = {} for key in ["boxes", "classes"]: result[key] = tf.concat([b[key] for b in boxes], axis=0) if len(boxes) != 0 and "confidence" in boxes[0]: result["confidence"] = tf.concat( [b["confidence"] for b in boxes], axis=0 ) return result METRIC_NAMES = [ "AP", "AP50", "AP75", "APs", "APm", "APl", "ARmax1", "ARmax10", "ARmax100", "ARs", "ARm", "ARl", ] METRIC_MAPPING = { "AP": "MaP", "AP50": "MaP@[IoU=50]", "AP75": "MaP@[IoU=75]", "APs": "MaP@[area=small]", "APm": "MaP@[area=medium]", "APl": "MaP@[area=large]", "ARmax1": "Recall@[max_detections=1]", "ARmax10": "Recall@[max_detections=10]", "ARmax100": "Recall@[max_detections=100]", "ARs": "Recall@[area=small]", "ARm": "Recall@[area=medium]", "ARl": "Recall@[area=large]", } @keras_cv_export("keras_cv.metrics.BoxCOCOMetrics") class BoxCOCOMetrics(keras.metrics.Metric): """BoxCOCOMetrics computes standard object detection metrics. Args: bounding_box_format: the bounding box format for inputs. evaluate_freq: the number of steps to run before each evaluation. Due to the high computational cost of metric evaluation the final results are only updated once every `evaluate_freq` steps. Higher values will allow for faster training times, while lower numbers allow for higher numerical precision in metric reporting. Usage: `BoxCOCOMetrics()` can be used like any standard metric with any KerasCV object detection model. Inputs to `y_true` must be KerasCV bounding box dictionaries, `{"classes": classes, "boxes": boxes}`, and `y_pred` must follow the same format with an additional `confidence` key. Unfortunately, at the moment `BoxCOCOMetrics()` are not TPU compatible with the `fit()` API. If you wish to evaluate `BoxCOCOMetrics()` for a model trained on TPU, we recommend using the `model.predict()` API and manually updating the metric state with the results. Using this metric suite alongside a model is trivial; simply provide it to the `compile()` arguments of the model: ```python images = tf.ones(shape=(1, 512, 512, 3)) labels = { "boxes": [ [ [0, 0, 100, 100], [100, 100, 200, 200], [300, 300, 400, 400], ] ], "classes": [[1, 1, 1]], } model = keras_cv.models.RetinaNet( num_classes=20, bounding_box_format="xywh", ) # Evaluate model model(images) # Train model model.compile( classification_loss='focal', box_loss='smoothl1', optimizer=tf.optimizers.SGD(global_clipnorm=10.0), metrics=[keras_cv.metrics.BoxCOCOMetrics('xywh')] ) model.fit(images, labels) ``` """ def __init__(self, bounding_box_format, evaluate_freq, name=None, **kwargs): if "dtype" not in kwargs: kwargs["dtype"] = "float32" super().__init__(name=name, **kwargs) self.ground_truths = [] self.predictions = [] self.bounding_box_format = bounding_box_format self.evaluate_freq = evaluate_freq self._eval_step_count = 0 self._cached_result = [0] * len(METRIC_NAMES) def __new__(cls, *args, **kwargs): obj = super(keras.metrics.Metric, cls).__new__(cls) # Wrap the update_state function in a py_function and scope it to /cpu:0 obj_update_state = obj.update_state def update_state_on_cpu( y_true_boxes, y_true_classes, y_pred_boxes, y_pred_classes, y_pred_confidence, sample_weight=None, ): y_true = {"boxes": y_true_boxes, "classes": y_true_classes} y_pred = { "boxes": y_pred_boxes, "classes": y_pred_classes, "confidence": y_pred_confidence, } with tf.device("/cpu:0"): return obj_update_state(y_true, y_pred, sample_weight) obj.update_state_on_cpu = update_state_on_cpu def update_state_fn(self, y_true, y_pred, sample_weight=None): y_true_boxes = y_true["boxes"] y_true_classes = y_true["classes"] y_pred_boxes = y_pred["boxes"] y_pred_classes = y_pred["classes"] y_pred_confidence = y_pred["confidence"] eager_inputs = [ y_true_boxes, y_true_classes, y_pred_boxes, y_pred_classes, y_pred_confidence, ] if sample_weight is not None: eager_inputs.append(sample_weight) return tf.py_function( func=self.update_state_on_cpu, inp=eager_inputs, Tout=[] ) obj.update_state = types.MethodType(update_state_fn, obj) # Wrap the result function in a py_function and scope it to /cpu:0 obj_result = obj.result def result_on_host_cpu(force): with tf.device("/cpu:0"): # Without the call to `constant` `tf.py_function` selects the # first index automatically and just returns obj_result()[0] return tf.constant(obj_result(force), obj.dtype) obj.result_on_host_cpu = result_on_host_cpu def result_fn(self, force=False): py_func_result = tf.py_function( self.result_on_host_cpu, inp=[force], Tout=obj.dtype ) result = {} for i, key in enumerate(METRIC_NAMES): result[self.name_prefix() + METRIC_MAPPING[key]] = ( py_func_result[i] ) return result obj.result = types.MethodType(result_fn, obj) return obj def name_prefix(self): if self.name.startswith("box_coco_metrics"): return "" return self.name + "_" def update_state(self, y_true, y_pred, sample_weight=None): self._eval_step_count += 1 if isinstance(y_true["boxes"], tf.RaggedTensor) != isinstance( y_pred["boxes"], tf.RaggedTensor ): # Make sure we have same ragged/dense status for y_true and y_pred y_true = bounding_box.to_dense(y_true) y_pred = bounding_box.to_dense(y_pred) self.ground_truths.append(y_true) self.predictions.append(y_pred) # Compute on first step, so we don't have an inconsistent list of # metrics in our train_step() results. This will just populate the # metrics with `0.0` until we get to `evaluate_freq`. if self._eval_step_count % self.evaluate_freq == 0: self._cached_result = self._compute_result() def reset_state(self): self.ground_truths = [] self.predictions = [] self._eval_step_count = 0 self._cached_result = [0] * len(METRIC_NAMES) def result(self, force=False): if force: self._cached_result = self._compute_result() return self._cached_result def _compute_result(self): if len(self.predictions) == 0 or len(self.ground_truths) == 0: return dict([(key, 0) for key in METRIC_NAMES]) with HidePrints(): metrics = compute_pycocotools_metric( _box_concat(self.ground_truths), _box_concat(self.predictions), self.bounding_box_format, ) results = [] for key in METRIC_NAMES: # Workaround for the state where there are 0 boxes in a category. results.append(max(metrics[key], 0.0)) return results def compute_pycocotools_metric(y_true, y_pred, bounding_box_format): y_true = bounding_box.to_dense(y_true) y_pred = bounding_box.to_dense(y_pred) box_pred = y_pred["boxes"] cls_pred = y_pred["classes"] confidence_pred = y_pred["confidence"] gt_boxes = y_true["boxes"] gt_classes = y_true["classes"] box_pred = bounding_box.convert_format( box_pred, source=bounding_box_format, target="yxyx" ) gt_boxes = bounding_box.convert_format( gt_boxes, source=bounding_box_format, target="yxyx" ) total_images = gt_boxes.shape[0] source_ids = np.char.mod("%d", np.linspace(1, total_images, total_images)) ground_truth = {} ground_truth["source_id"] = [source_ids] ground_truth["num_detections"] = [ ops.sum(ops.cast(y_true["classes"] >= 0, "int32"), axis=-1) ] ground_truth["boxes"] = [ops.convert_to_numpy(gt_boxes)] ground_truth["classes"] = [ops.convert_to_numpy(gt_classes)] predictions = {} predictions["source_id"] = [source_ids] predictions["detection_boxes"] = [ops.convert_to_numpy(box_pred)] predictions["detection_classes"] = [ops.convert_to_numpy(cls_pred)] predictions["detection_scores"] = [ops.convert_to_numpy(confidence_pred)] predictions["num_detections"] = [ ops.sum(ops.cast(confidence_pred > 0, "int32"), axis=-1) ] return coco.compute_pycoco_metrics(ground_truth, predictions)
keras-cv/keras_cv/metrics/object_detection/box_coco_metrics.py/0
{ "file_path": "keras-cv/keras_cv/metrics/object_detection/box_coco_metrics.py", "repo_id": "keras-cv", "token_count": 4743 }
18
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DenseNet backbone model. Reference: - [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993) - [Based on the Original keras.applications DenseNet](https://github.com/keras-team/keras/blob/master/keras/applications/densenet.py) """ # noqa: E501 import copy from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.models import utils from keras_cv.models.backbones.backbone import Backbone from keras_cv.models.backbones.densenet.densenet_backbone_presets import ( backbone_presets, ) from keras_cv.models.backbones.densenet.densenet_backbone_presets import ( backbone_presets_with_weights, ) from keras_cv.utils.python_utils import classproperty BN_AXIS = 3 BN_EPSILON = 1.001e-5 @keras_cv_export("keras_cv.models.DenseNetBackbone") class DenseNetBackbone(Backbone): """Instantiates the DenseNet architecture. Args: stackwise_num_repeats: list of ints, number of repeated convolutional blocks per dense block. include_rescaling: bool, whether to rescale the inputs. If set to `True`, inputs will be passed through a `Rescaling(1/255.0)` layer. input_shape: optional shape tuple, defaults to (None, None, 3). input_tensor: optional Keras tensor (i.e. output of `keras.layers.Input()`) to use as image input for the model. compression_ratio: float, compression rate at transition layers. growth_rate: int, number of filters added by each dense block. Examples: ```python input_data = tf.ones(shape=(8, 224, 224, 3)) # Pretrained backbone model = keras_cv.models.DenseNetBackbone.from_preset("densenet121_imagenet") output = model(input_data) # Randomly initialized backbone with a custom config model = DenseNetBackbone( stackwise_num_repeats=[6, 12, 24, 16], include_rescaling=False, ) output = model(input_data) ``` """ # noqa: E501 def __init__( self, *, stackwise_num_repeats, include_rescaling, input_shape=(None, None, 3), input_tensor=None, compression_ratio=0.5, growth_rate=32, **kwargs, ): inputs = utils.parse_model_inputs(input_shape, input_tensor) x = inputs if include_rescaling: x = keras.layers.Rescaling(1 / 255.0)(x) x = keras.layers.Conv2D( 64, 7, strides=2, use_bias=False, padding="same", name="conv1_conv" )(x) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name="conv1_bn" )(x) x = keras.layers.Activation("relu", name="conv1_relu")(x) x = keras.layers.MaxPooling2D( 3, strides=2, padding="same", name="pool1" )(x) pyramid_level_inputs = {} for stack_index in range(len(stackwise_num_repeats) - 1): index = stack_index + 2 x = apply_dense_block( x, stackwise_num_repeats[stack_index], growth_rate, name=f"conv{index}", ) pyramid_level_inputs[f"P{index}"] = utils.get_tensor_input_name(x) x = apply_transition_block( x, compression_ratio, name=f"pool{index}" ) x = apply_dense_block( x, stackwise_num_repeats[-1], growth_rate, name=f"conv{len(stackwise_num_repeats) + 1}", ) pyramid_level_inputs[f"P{len(stackwise_num_repeats) + 1}"] = ( utils.get_tensor_input_name(x) ) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name="bn" )(x) x = keras.layers.Activation("relu", name="relu")(x) # Create model. super().__init__(inputs=inputs, outputs=x, **kwargs) # All references to `self` below this line self.pyramid_level_inputs = pyramid_level_inputs self.stackwise_num_repeats = stackwise_num_repeats self.include_rescaling = include_rescaling self.input_tensor = input_tensor self.compression_ratio = compression_ratio self.growth_rate = growth_rate def get_config(self): config = super().get_config() config.update( { "stackwise_num_repeats": self.stackwise_num_repeats, "include_rescaling": self.include_rescaling, # Remove batch dimension from `input_shape` "input_shape": self.input_shape[1:], "input_tensor": self.input_tensor, "compression_ratio": self.compression_ratio, "growth_rate": self.growth_rate, } ) return config @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return copy.deepcopy(backbone_presets) @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" # noqa: E501 return copy.deepcopy(backbone_presets_with_weights) def apply_dense_block(x, num_repeats, growth_rate, name=None): """A dense block. Args: x: input tensor. num_repeats: int, number of repeated convolutional blocks. growth_rate: int, number of filters added by each dense block. name: string, block label. """ if name is None: name = f"dense_block_{keras.backend.get_uid('dense_block')}" for i in range(num_repeats): x = apply_conv_block(x, growth_rate, name=f"{name}_block_{i}") return x def apply_transition_block(x, compression_ratio, name=None): """A transition block. Args: x: input tensor. compression_ratio: float, compression rate at transition layers. name: string, block label. """ if name is None: name = f"transition_block_{keras.backend.get_uid('transition_block')}" x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=f"{name}_bn" )(x) x = keras.layers.Activation("relu", name=f"{name}_relu")(x) x = keras.layers.Conv2D( int(x.shape[BN_AXIS] * compression_ratio), 1, use_bias=False, name=f"{name}_conv", )(x) x = keras.layers.AveragePooling2D(2, strides=2, name=f"{name}_pool")(x) return x def apply_conv_block(x, growth_rate, name=None): """A building block for a dense block. Args: x: input tensor. growth_rate: int, number of filters added by each dense block. name: string, block label. """ if name is None: name = f"conv_block_{keras.backend.get_uid('conv_block')}" shortcut = x x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=f"{name}_0_bn" )(x) x = keras.layers.Activation("relu", name=f"{name}_0_relu")(x) x = keras.layers.Conv2D( 4 * growth_rate, 1, use_bias=False, name=f"{name}_1_conv" )(x) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=f"{name}_1_bn" )(x) x = keras.layers.Activation("relu", name=f"{name}_1_relu")(x) x = keras.layers.Conv2D( growth_rate, 3, padding="same", use_bias=False, name=f"{name}_2_conv", )(x) x = keras.layers.Concatenate(axis=BN_AXIS, name=f"{name}_concat")( [shortcut, x] ) return x
keras-cv/keras_cv/models/backbones/densenet/densenet_backbone.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_backbone.py", "repo_id": "keras-cv", "token_count": 3671 }
19
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for loading pretrained model presets.""" import numpy as np import pytest from keras_cv.backend import ops from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone import ( MobileNetV3Backbone, ) from keras_cv.tests.test_case import TestCase @pytest.mark.large class MobileNetV3PresetSmokeTest(TestCase): """ A smoke test for MobileNetV3 presets we run continuously. This only tests the smallest weights we have available. Run with: `pytest keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets_test.py --run_large` """ # noqa: E501 def setUp(self): self.input_batch = np.ones(shape=(8, 224, 224, 3)) def test_backbone_output(self): model = MobileNetV3Backbone.from_preset("mobilenet_v3_small_imagenet") outputs = model(self.input_batch) # The forward pass from a preset should be stable! # This test should catch cases where we unintentionally change our # network code in a way that would invalidate our preset weights. # We should only update these numbers if we are updating a weights # file, or have found a discrepancy with the upstream source. outputs = outputs[0, 0, 0, :5] expected = [0.25, 1.13, -0.26, 0.10, 0.03] # Keep a high tolerance, so we are robust to different hardware. self.assertAllClose( ops.convert_to_numpy(outputs), expected, atol=0.01, rtol=0.01 ) @pytest.mark.extra_large class MobileNetV3PresetFullTest(TestCase): """ Test the full enumeration of our preset. This tests every preset for MobileNetV3 and is only run manually. Run with: `pytest keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets_test.py --run_extra_large` """ # noqa: E501 def test_load_mobilenet_v3(self): input_data = np.ones(shape=(2, 224, 224, 3)) for preset in MobileNetV3Backbone.presets: model = MobileNetV3Backbone.from_preset(preset) model(input_data)
keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets_test.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets_test.py", "repo_id": "keras-cv", "token_count": 960 }
20
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras import layers from keras_cv.models import utils from keras_cv.models.backbones.backbone import Backbone class VGG16Backbone(Backbone): """ Reference: - [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556) (ICLR 2015) This class represents Keras Backbone of VGG16 model. Args: include_rescaling: bool, whether to rescale the inputs. If set to True, inputs will be passed through a `Rescaling(1/255.0)` layer. include_top: bool, whether to include the 3 fully-connected layers at the top of the network. If provided, num_classes must be provided. num_classes: int, optional number of classes to classify images into, only to be specified if `include_top` is True. input_shape: tuple, optional shape tuple, defaults to (224, 224, 3). input_tensor: Tensor, optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. pooling: bool, Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classifier_activation:`str` or callable. The activation function to use on the "top" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the "top" layer. When loading pretrained weights, `classifier_activation` can only be `None` or `"softmax"`. name: (Optional) name to pass to the model, defaults to "VGG16". Returns: A `keras.Model` instance. """ # noqa: E501 def __init__( self, include_rescaling, include_top, input_tensor=None, num_classes=None, input_shape=(224, 224, 3), pooling=None, classifier_activation="softmax", name="VGG16", **kwargs, ): if include_top and num_classes is None: raise ValueError( "If `include_top` is True, you should specify `num_classes`. " f"Received: num_classes={num_classes}" ) if include_top and pooling: raise ValueError( f"`pooling` must be `None` when `include_top=True`." f"Received pooling={pooling} and include_top={include_top}. " ) img_input = utils.parse_model_inputs(input_shape, input_tensor) x = img_input if include_rescaling: x = layers.Rescaling(scale=1 / 255.0)(x) x = apply_vgg_block( x=x, num_layers=2, filters=64, kernel_size=(3, 3), activation="relu", padding="same", max_pool=True, name="block1", ) x = apply_vgg_block( x=x, num_layers=2, filters=128, kernel_size=(3, 3), activation="relu", padding="same", max_pool=True, name="block2", ) x = apply_vgg_block( x=x, num_layers=3, filters=256, kernel_size=(3, 3), activation="relu", padding="same", max_pool=True, name="block3", ) x = apply_vgg_block( x=x, num_layers=3, filters=512, kernel_size=(3, 3), activation="relu", padding="same", max_pool=True, name="block4", ) x = apply_vgg_block( x=x, num_layers=3, filters=512, kernel_size=(3, 3), activation="relu", padding="same", max_pool=True, name="block5", ) if include_top: x = layers.Flatten(name="flatten")(x) x = layers.Dense(4096, activation="relu", name="fc1")(x) x = layers.Dense(4096, activation="relu", name="fc2")(x) x = layers.Dense( num_classes, activation=classifier_activation, name="predictions", )(x) else: if pooling == "avg": x = layers.GlobalAveragePooling2D()(x) elif pooling == "max": x = layers.GlobalMaxPooling2D()(x) super().__init__(inputs=img_input, outputs=x, name=name, **kwargs) self.include_rescaling = include_rescaling self.include_top = include_top self.num_classes = num_classes self.input_tensor = input_tensor self.pooling = pooling self.classifier_activation = classifier_activation def get_config(self): return { "include_rescaling": self.include_rescaling, "include_top": self.include_top, "name": self.name, "input_shape": self.input_shape[1:], "input_tensor": self.input_tensor, "pooling": self.pooling, "num_classes": self.num_classes, "classifier_activation": self.classifier_activation, "trainable": self.trainable, } def apply_vgg_block( x, num_layers, filters, kernel_size, activation, padding, max_pool, name, ): """ Applies VGG block Args: x: Tensor, input tensor to pass through network num_layers: int, number of CNN layers in the block filters: int, filter size of each CNN layer in block kernel_size: int (or) tuple, kernel size for CNN layer in block activation: str (or) callable, activation function for each CNN layer in block padding: str (or) callable, padding function for each CNN layer in block max_pool: bool, whether to add MaxPooling2D layer at end of block name: str, name of the block Returns: keras.KerasTensor """ for num in range(1, num_layers + 1): x = layers.Conv2D( filters, kernel_size, activation=activation, padding=padding, name=f"{name}_conv{num}", )(x) if max_pool: x = layers.MaxPooling2D((2, 2), (2, 2), name=f"{name}_pool")(x) return x
keras-cv/keras_cv/models/backbones/vgg16/vgg16_backbone.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/vgg16/vgg16_backbone.py", "repo_id": "keras-cv", "token_count": 3392 }
21
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.models.feature_extractor.clip.clip_encoder import CLIPEncoder from keras_cv.models.feature_extractor.clip.clip_encoder import get_initializer class CLIPPatchingAndEmbedding(keras.layers.Layer): def __init__( self, width, patch_size, input_resolution, output_dim, **kwargs ): super().__init__(**kwargs) self.conv1 = keras.layers.Conv2D( filters=width, kernel_size=patch_size, strides=patch_size, padding="valid", use_bias=False, data_format="channels_last", kernel_initializer=get_initializer(0.02), name="patch_embed.embedding", ) self.width = width self.input_resolution = input_resolution self.patch_size = patch_size self.num_patches = ops.power( (self.input_resolution // self.patch_size), 2 ) self.class_embedding_initializer = get_initializer( ops.power(self.width, -0.5) * 0.02 ) self.output_dim = output_dim def build(self, input_shape): super().build(input_shape) self.conv1.build(input_shape) self.class_embedding = self.add_weight( shape=((self.width,)), initializer=self.class_embedding_initializer, name="patch_embed.class_embedding", ) self.positional_embedding = self.add_weight( shape=( ( (self.input_resolution // self.patch_size) ** 2 + 1, self.width, ) ), trainable=True, name="patch_embed.positional_embedding", ) def call(self, x): batch_size = ops.shape(x)[0] patch_embeddings = self.conv1(x) # shape = [*, grid, grid, channel] patch_embeddings = ops.reshape( patch_embeddings, (batch_size, self.num_patches, -1) ) class_embeds = ops.broadcast_to( self.class_embedding, (batch_size, 1, self.width) ) embeddings = ops.concatenate( [class_embeds, patch_embeddings], axis=1 ) # shape = [*, grid ** 2 + 1, width] positional_embedding = self.positional_embedding embeddings = embeddings + positional_embedding return embeddings def get_config(self): config = super().get_config() config.update( { "width": self.width, "patch_size": self.patch_size, "input_resolution": self.input_resolution, "output_dim": self.output_dim, } ) return config class CLIPImageEncoder(keras.Model): def __init__( self, input_resolution, patch_size, width, num_layers, heads, output_dim, **kwargs, ): super().__init__( **kwargs, ) self.input_resolution = input_resolution self.width = width self.patch_size = patch_size self.output_dim = output_dim self.heads = heads self.num_layers = num_layers self.embeddings = CLIPPatchingAndEmbedding( width=self.width, patch_size=self.patch_size, input_resolution=self.input_resolution, output_dim=self.output_dim, name="clip_patch_embedding", ) self.pre_norm = keras.layers.LayerNormalization( epsilon=1e-5, name="ln_1" ) self.encoder = CLIPEncoder( self.width, self.num_layers, self.heads, name="clip_encoder", ) self.post_norm = keras.layers.LayerNormalization( epsilon=1e-5, name="ln_2" ) self.image_projector = keras.layers.Dense( output_dim, name="vision_projector", use_bias=False ) def build(self, input_shape): super().build(input_shape) self.embeddings.build(input_shape) self.pre_norm.build([None, None, self.width]) self.encoder.build(None) self.post_norm.build([None, self.width]) self.image_projector.build([None, None, self.width]) def call(self, image): x = self.embeddings(image) x = self.pre_norm(x) x = self.encoder(x) x = self.post_norm(x[:, 0, :]) image_projected_embeddings = self.image_projector(x) return image_projected_embeddings def get_config(self): config = super().get_config() config.update( { "input_resolution": self.input_resolution, "patch_size": self.patch_size, "width": self.width, "layers": self.num_layers, "heads": self.heads, "output_dim": self.output_dim, } ) return config
keras-cv/keras_cv/models/feature_extractor/clip/clip_image_model.py/0
{ "file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_image_model.py", "repo_id": "keras-cv", "token_count": 2681 }
22
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """YOLOv8 Backbone presets.""" backbone_presets_no_weights = { "yolo_v8_xs_backbone": { "metadata": { "description": "An extra small YOLOV8 backbone", "params": 1277680, "official_name": "YOLOV8", "path": "yolo_v8", }, "kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_xs_backbone/2", }, "yolo_v8_s_backbone": { "metadata": { "description": "A small YOLOV8 backbone", "params": 5089760, "official_name": "YOLOV8", "path": "yolo_v8", }, "kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_s_backbone/2", }, "yolo_v8_m_backbone": { "metadata": { "description": "A medium YOLOV8 backbone", "params": 11872464, "official_name": "YOLOV8", "path": "yolo_v8", }, "kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_m_backbone/2", }, "yolo_v8_l_backbone": { "metadata": { "description": "A large YOLOV8 backbone", "params": 19831744, "official_name": "YOLOV8", "path": "yolo_v8", }, "kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_l_backbone/2", }, "yolo_v8_xl_backbone": { "metadata": { "description": "An extra large YOLOV8 backbone", "params": 30972080, "official_name": "YOLOV8", "path": "yolo_v8", }, "kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_xl_backbone/2", }, } backbone_presets_with_weights = { "yolo_v8_xs_backbone_coco": { "metadata": { "description": ( "An extra small YOLOV8 backbone pretrained on COCO" ), "params": 1277680, "official_name": "YOLOV8", "path": "yolo_v8", }, "kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_xs_backbone_coco/2", # noqa: E501 }, "yolo_v8_s_backbone_coco": { "metadata": { "description": ("A small YOLOV8 backbone pretrained on COCO"), "params": 5089760, "official_name": "YOLOV8", "path": "yolo_v8", }, "kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_s_backbone_coco/2", # noqa: E501 }, "yolo_v8_m_backbone_coco": { "metadata": { "description": ("A medium YOLOV8 backbone pretrained on COCO"), "params": 11872464, "official_name": "YOLOV8", "path": "yolo_v8", }, "kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_m_backbone_coco/2", # noqa: E501 }, "yolo_v8_l_backbone_coco": { "metadata": { "description": ("A large YOLOV8 backbone pretrained on COCO"), "params": 19831744, "official_name": "YOLOV8", "path": "yolo_v8", }, "kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_l_backbone_coco/2", # noqa: E501 }, "yolo_v8_xl_backbone_coco": { "metadata": { "description": ( "An extra large YOLOV8 backbone pretrained on COCO" ), "params": 30972080, "official_name": "YOLOV8", "path": "yolo_v8", }, "kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_xl_backbone_coco/2", # noqa: E501 }, } backbone_presets = { **backbone_presets_no_weights, **backbone_presets_with_weights, }
keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone_presets.py/0
{ "file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone_presets.py", "repo_id": "keras-cv", "token_count": 2152 }
23
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.models import MiTBackbone from keras_cv.models.segmentation.segformer.segformer_presets import ( # noqa: E501 presets, ) from keras_cv.models.segmentation.segformer.segformer_presets import ( # noqa: E501 presets_with_weights, ) from keras_cv.models.task import Task from keras_cv.utils.python_utils import classproperty from keras_cv.utils.train import get_feature_extractor @keras_cv_export( ["keras_cv.models.SegFormer", "keras_cv.models.segmentation.SegFormer"] ) class SegFormer(Task): """A Keras model implementing the SegFormer architecture for semantic segmentation. References: - [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) # noqa: E501 - [Based on the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/tree/main/deepvision/models/segmentation/segformer) # noqa: E501 Args: backbone: `keras.Model`. The backbone network for the model that is used as a feature extractor for the SegFormer encoder. It is *intended* to be used only with the MiT backbone model which was created specifically for SegFormers. It should either be a `keras_cv.models.backbones.backbone.Backbone` or a `tf.keras.Model` that implements the `pyramid_level_inputs` property with keys "P2", "P3", "P4", and "P5" and layer names as values. num_classes: int, the number of classes for the detection model, including the background class. projection_filters: int, number of filters in the convolution layer projecting the concatenated features into a segmentation map. Defaults to 256`. Examples: Using the class with a `backbone`: ```python import tensorflow as tf import keras_cv images = np.ones(shape=(1, 96, 96, 3)) labels = np.zeros(shape=(1, 96, 96, 1)) backbone = keras_cv.models.MiTBackbone.from_preset("mit_b0_imagenet") model = keras_cv.models.segmentation.SegFormer( num_classes=1, backbone=backbone, ) # Evaluate model model(images) # Train model model.compile( optimizer="adam", loss=keras.losses.BinaryCrossentropy(from_logits=False), metrics=["accuracy"], ) model.fit(images, labels, epochs=3) ``` """ def __init__( self, backbone, num_classes, projection_filters=256, **kwargs, ): if not isinstance(backbone, keras.layers.Layer) or not isinstance( backbone, keras.Model ): raise ValueError( "Argument `backbone` must be a `keras.layers.Layer` instance " f" or `keras.Model`. Received instead " f"backbone={backbone} (of type {type(backbone)})." ) inputs = backbone.input feature_extractor = get_feature_extractor( backbone, list(backbone.pyramid_level_inputs.values()) ) # Multi-level dictionary features = list(feature_extractor(inputs).values()) # Get H and W of level one output _, H, W, _ = features[0].shape # Project all multi-level outputs onto the same dimensionality # and feature map shape multi_layer_outs = [] for feature_dim, feature in zip(backbone.embedding_dims, features): out = keras.layers.Dense( projection_filters, name=f"linear_{feature_dim}" )(feature) out = keras.layers.Resizing(H, W, interpolation="bilinear")(out) multi_layer_outs.append(out) # Concat now-equal feature maps concatenated_outs = keras.layers.Concatenate(axis=3)( multi_layer_outs[::-1] ) # Fuse concatenated features into a segmentation map seg = keras.Sequential( [ keras.layers.Conv2D( filters=projection_filters, kernel_size=1, use_bias=False ), keras.layers.BatchNormalization(), keras.layers.Activation("relu"), ] )(concatenated_outs) seg = keras.layers.Dropout(0.1)(seg) seg = keras.layers.Conv2D( filters=num_classes, kernel_size=1, activation="softmax" )(seg) output = keras.layers.Resizing( height=inputs.shape[1], width=inputs.shape[2], interpolation="bilinear", )(seg) super().__init__( inputs=inputs, outputs=output, **kwargs, ) self.num_classes = num_classes self.projection_filters = projection_filters self.backbone = backbone def get_config(self): config = super().get_config() config.update( { "num_classes": self.num_classes, "projection_filters": self.projection_filters, "backbone": keras.saving.serialize_keras_object(self.backbone), } ) return config @classmethod def from_preset( cls, preset, num_classes, load_weights=None, input_shape=None, **kwargs, ): aliases = { "segformer_b0": "mit_b0", "segformer_b1": "mit_b1", "segformer_b2": "mit_b2", "segformer_b3": "mit_b3", "segformer_b4": "mit_b4", "segformer_b5": "mit_b5", } if preset in aliases: preset = aliases[preset] return super().from_preset( preset, load_weights=load_weights, num_classes=num_classes, input_shape=input_shape, **kwargs, ) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return copy.deepcopy(presets) @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return copy.deepcopy(presets_with_weights) @classproperty def backbone_presets(cls): return copy.deepcopy(MiTBackbone.presets)
keras-cv/keras_cv/models/segmentation/segformer/segformer.py/0
{ "file_path": "keras-cv/keras_cv/models/segmentation/segformer/segformer.py", "repo_id": "keras-cv", "token_count": 3095 }
24
# Copyright 2022 The KerasCV Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.models.stable_diffusion.padded_conv2d import PaddedConv2D class AttentionBlock(keras.layers.Layer): def __init__(self, output_dim, **kwargs): super().__init__(**kwargs) self.output_dim = output_dim self.norm = keras.layers.GroupNormalization(epsilon=1e-5) self.q = PaddedConv2D(output_dim, 1) self.k = PaddedConv2D(output_dim, 1) self.v = PaddedConv2D(output_dim, 1) self.proj_out = PaddedConv2D(output_dim, 1) def call(self, inputs): x = self.norm(inputs) q, k, v = self.q(x), self.k(x), self.v(x) # Compute attention shape = ops.shape(q) h, w, c = shape[1], shape[2], shape[3] q = ops.reshape(q, (-1, h * w, c)) # b, hw, c k = ops.transpose(k, (0, 3, 1, 2)) k = ops.reshape(k, (-1, c, h * w)) # b, c, hw y = q @ k y = y * 1 / ops.sqrt(ops.cast(c, self.compute_dtype)) y = keras.activations.softmax(y) # Attend to values v = ops.transpose(v, (0, 3, 1, 2)) v = ops.reshape(v, (-1, c, h * w)) y = ops.transpose(y, (0, 2, 1)) x = v @ y x = ops.transpose(x, (0, 2, 1)) x = ops.reshape(x, (-1, h, w, c)) return self.proj_out(x) + inputs
keras-cv/keras_cv/models/stable_diffusion/attention_block.py/0
{ "file_path": "keras-cv/keras_cv/models/stable_diffusion/attention_block.py", "repo_id": "keras-cv", "token_count": 855 }
25
# Copyright 2022 The KerasCV Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """IoU3D using a custom TF op.""" from keras_cv.utils.resource_loader import LazySO keras_cv_custom_ops = LazySO("custom_ops/_keras_cv_custom_ops.so") def iou_3d(y_true, y_pred): """Implements IoU computation for 3D upright rotated bounding boxes. Note that this is implemented using a custom TensorFlow op. If you don't have KerasCV installed with custom ops, calling this will fail. Boxes should have the format CENTER_XYZ_DXDYDZ_PHI. Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py for more details on supported bounding box formats. Sample Usage: ```python y_true = [[0, 0, 0, 2, 2, 2, 0], [1, 1, 1, 2, 2, 2, 3 * math.pi / 4]] y_pred = [[1, 1, 1, 2, 2, 2, math.pi / 4], [1, 1, 1, 2, 2, 2, 0]] iou_3d(y_true, y_pred) ``` """ return keras_cv_custom_ops.ops.kcv_pairwise_iou3d(y_true, y_pred)
keras-cv/keras_cv/ops/iou_3d.py/0
{ "file_path": "keras-cv/keras_cv/ops/iou_3d.py", "repo_id": "keras-cv", "token_count": 543 }
26
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras import optimizers from keras_cv.losses import SimCLRLoss from keras_cv.models import ResNet50V2Backbone from keras_cv.tests.test_case import TestCase from keras_cv.training import SimCLRAugmenter from keras_cv.training import SimCLRTrainer # TODO(jbischof): revisit "extra_large" tag once development resumes. # These tests are currently some of the slowest in our repo. @pytest.mark.extra_large class SimCLRTrainerTest(TestCase): def test_train_without_probing(self): simclr_without_probing = SimCLRTrainer( self.build_encoder(), augmenter=SimCLRAugmenter(value_range=(0, 255)), ) images = tf.random.uniform((10, 512, 512, 3)) simclr_without_probing.compile( encoder_optimizer=optimizers.Adam(), encoder_loss=SimCLRLoss(temperature=0.5), ) simclr_without_probing.fit(images) def build_encoder(self): return keras.Sequential( [ ResNet50V2Backbone(include_rescaling=False), layers.GlobalAveragePooling2D(name="avg_pool"), ] )
keras-cv/keras_cv/training/contrastive/simclr_trainer_test.py/0
{ "file_path": "keras-cv/keras_cv/training/contrastive/simclr_trainer_test.py", "repo_id": "keras-cv", "token_count": 676 }
27
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.backend import keras def scale_loss_for_distribution(loss_value): """Scales and returns the given loss value by the number of replicas.""" num_replicas = tf.distribute.get_strategy().num_replicas_in_sync if num_replicas > 1: loss_value *= 1.0 / num_replicas return loss_value def convert_inputs_to_tf_dataset( x=None, y=None, sample_weight=None, batch_size=None ): if sample_weight is not None: raise ValueError( "Contrastive trainers do not yet support `sample_weight`." ) if isinstance(x, tf.data.Dataset): if y is not None or batch_size is not None: raise ValueError( "When `x` is a `tf.data.Dataset`, please do not " "provide a value for `y` or `batch_size`. " "Got `y={y}`, `batch_size={batch_size}`." ) return x # batch_size defaults to 32, as it does in fit(). batch_size = batch_size or 32 # Parse inputs inputs = x if y is not None: inputs = (x, y) # Construct tf.data.Dataset dataset = tf.data.Dataset.from_tensor_slices(inputs) if batch_size is not None: dataset = dataset.batch(batch_size) return dataset def get_feature_extractor(model, layer_names, output_keys=None): """Create a feature extractor model with augmented output. This method produces a new `keras.Model` with the same input signature as the source but with the layers in `layer_names` as the output. This is useful for downstream tasks that require more output than the final layer of the backbone. Args: model: keras.Model. The source model. layer_names: list of strings. Names of layers to include in the output signature. output_keys: optional, list of strings. Key to use for each layer in the model's output dictionary. Returns: `keras.Model` which has dict as outputs. """ if not output_keys: output_keys = layer_names items = zip(output_keys, layer_names) outputs = {key: model.get_layer(name).output for key, name in items} return keras.Model(inputs=model.inputs, outputs=outputs)
keras-cv/keras_cv/utils/train.py/0
{ "file_path": "keras-cv/keras_cv/utils/train.py", "repo_id": "keras-cv", "token_count": 1036 }
28
[metadata] license_files = LICENSE description_file = README.md version = attr: keras_cv.__version__ [tool:pytest] filterwarnings = error ignore::DeprecationWarning ignore::ImportWarning ignore::RuntimeWarning ignore::PendingDeprecationWarning ignore::FutureWarning [flake8] max-line-length = 80 per-file-ignores = ./keras_cv/__init__.py:E402, F401 ./examples/**/*:E402 **/__init__.py:F401 ignore = # Conflicts with black E203 # defaults flake8 ignores E121,E123,E126,E226,E24,E704,W503,W504 # Function name should be lowercase N802 # lowercase ... imported as non lowercase # Useful to ignore for "import keras.backend as K" N812 # do not use bare 'except' E722 # Escape characters check. # Conflict with pytest error message regex. W605 # Ignore for tf.cond lambda E731
keras-cv/setup.cfg/0
{ "file_path": "keras-cv/setup.cfg", "repo_id": "keras-cv", "token_count": 343 }
29
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ # Setup/utils """ import time import matplotlib.pyplot as plt import tensorflow as tf import tensorflow.keras.layers as layers from tensorflow import keras from tensorflow.keras import backend from keras_cv.utils import bounding_box from keras_cv.utils import fill_utils def single_rectangle_mask(corners, mask_shape): """Computes masks of rectangles Args: corners: tensor of rectangle coordinates with shape (batch_size, 4) in corners format (x0, y0, x1, y1). mask_shape: a shape tuple as (width, height) indicating the output width and height of masks. Returns: boolean masks with shape (batch_size, width, height) where True values indicate positions within rectangle coordinates. """ # add broadcasting axes corners = corners[..., tf.newaxis, tf.newaxis] # split coordinates x0 = corners[0] y0 = corners[1] x1 = corners[2] y1 = corners[3] # repeat height and width width, height = mask_shape x0_rep = tf.repeat(x0, height, axis=0) y0_rep = tf.repeat(y0, width, axis=1) x1_rep = tf.repeat(x1, height, axis=0) y1_rep = tf.repeat(y1, width, axis=1) # range grid range_row = tf.range(0, height, dtype=corners.dtype) range_col = tf.range(0, width, dtype=corners.dtype) range_row = range_row[:, tf.newaxis] range_col = range_col[tf.newaxis, :] # boolean masks mask_x0 = tf.less_equal(x0_rep, range_col) mask_y0 = tf.less_equal(y0_rep, range_row) mask_x1 = tf.less(range_col, x1_rep) mask_y1 = tf.less(range_row, y1_rep) masks = mask_x0 & mask_y0 & mask_x1 & mask_y1 return masks def fill_single_rectangle( image, centers_x, centers_y, widths, heights, fill_values ): """Fill rectangles with fill value into images. Args: image: Tensor of images to fill rectangles into. centers_x: Tensor of positions of the rectangle centers on the x-axis. centers_y: Tensor of positions of the rectangle centers on the y-axis. widths: Tensor of widths of the rectangles heights: Tensor of heights of the rectangles fill_values: Tensor with same shape as images to get rectangle fill from. Returns: images with filled rectangles. """ images_shape = tf.shape(image) images_height = images_shape[0] images_width = images_shape[1] xywh = tf.stack([centers_x, centers_y, widths, heights], axis=0) xywh = tf.cast(xywh, tf.float32) corners = bounding_box.convert_to_corners(xywh, format="coco") mask_shape = (images_width, images_height) is_rectangle = single_rectangle_mask(corners, mask_shape) is_rectangle = tf.expand_dims(is_rectangle, -1) images = tf.where(is_rectangle, fill_values, image) return images """ # Layer Implementations ## Fully Vectorized """ class VectorizedRandomCutout(layers.Layer): def __init__( self, height_factor, width_factor, fill_mode="constant", fill_value=0.0, seed=None, **kwargs, ): super().__init__(**kwargs) self.height_lower, self.height_upper = self._parse_bounds(height_factor) self.width_lower, self.width_upper = self._parse_bounds(width_factor) if fill_mode not in ["gaussian_noise", "constant"]: raise ValueError( '`fill_mode` should be "gaussian_noise" ' f'or "constant". Got `fill_mode`={fill_mode}' ) if not isinstance(self.height_lower, type(self.height_upper)): raise ValueError( "`height_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.height_lower), type(self.height_upper) ) ) if not isinstance(self.width_lower, type(self.width_upper)): raise ValueError( "`width_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.width_lower), type(self.width_upper) ) ) if self.height_upper < self.height_lower: raise ValueError( "`height_factor` cannot have upper bound less than " "lower bound, got {}".format(height_factor) ) self._height_is_float = isinstance(self.height_lower, float) if self._height_is_float: if not self.height_lower >= 0.0 or not self.height_upper <= 1.0: raise ValueError( "`height_factor` must have values between [0, 1] " "when is float, got {}".format(height_factor) ) if self.width_upper < self.width_lower: raise ValueError( "`width_factor` cannot have upper bound less than " "lower bound, got {}".format(width_factor) ) self._width_is_float = isinstance(self.width_lower, float) if self._width_is_float: if not self.width_lower >= 0.0 or not self.width_upper <= 1.0: raise ValueError( "`width_factor` must have values between [0, 1] " "when is float, got {}".format(width_factor) ) self.fill_mode = fill_mode self.fill_value = fill_value self.seed = seed def _parse_bounds(self, factor): if isinstance(factor, (tuple, list)): return factor[0], factor[1] else: return type(factor)(0), factor @tf.function(jit_compile=True) def call(self, inputs, training=True): if training is None: training = backend.learning_phase() augment = lambda: self._random_cutout(inputs) no_augment = lambda: inputs return tf.cond(tf.cast(training, tf.bool), augment, no_augment) def _random_cutout(self, inputs): """Apply random cutout.""" center_x, center_y = self._compute_rectangle_position(inputs) rectangle_height, rectangle_width = self._compute_rectangle_size(inputs) rectangle_fill = self._compute_rectangle_fill(inputs) inputs = fill_utils.fill_rectangle( inputs, center_x, center_y, rectangle_width, rectangle_height, rectangle_fill, ) return inputs def _compute_rectangle_position(self, inputs): input_shape = tf.shape(inputs) batch_size, image_height, image_width = ( input_shape[0], input_shape[1], input_shape[2], ) center_x = tf.random.uniform( shape=[batch_size], minval=0, maxval=image_width, dtype=tf.int32, seed=self.seed, ) center_y = tf.random.uniform( shape=[batch_size], minval=0, maxval=image_height, dtype=tf.int32, seed=self.seed, ) return center_x, center_y def _compute_rectangle_size(self, inputs): input_shape = tf.shape(inputs) batch_size, image_height, image_width = ( input_shape[0], input_shape[1], input_shape[2], ) height = tf.random.uniform( [batch_size], minval=self.height_lower, maxval=self.height_upper, dtype=tf.float32, ) width = tf.random.uniform( [batch_size], minval=self.width_lower, maxval=self.width_upper, dtype=tf.float32, ) if self._height_is_float: height = height * tf.cast(image_height, tf.float32) if self._width_is_float: width = width * tf.cast(image_width, tf.float32) height = tf.cast(tf.math.ceil(height), tf.int32) width = tf.cast(tf.math.ceil(width), tf.int32) height = tf.minimum(height, image_height) width = tf.minimum(width, image_width) return height, width def _compute_rectangle_fill(self, inputs): input_shape = tf.shape(inputs) if self.fill_mode == "constant": fill_value = tf.fill(input_shape, self.fill_value) else: # gaussian noise fill_value = tf.random.normal(input_shape) return fill_value def get_config(self): config = { "height_factor": self.height_factor, "width_factor": self.width_factor, "fill_mode": self.fill_mode, "fill_value": self.fill_value, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) """ ## tf.map_fn """ class MapFnRandomCutout(layers.Layer): def __init__( self, height_factor, width_factor, fill_mode="constant", fill_value=0.0, seed=None, **kwargs, ): super().__init__(**kwargs) self.height_lower, self.height_upper = self._parse_bounds(height_factor) self.width_lower, self.width_upper = self._parse_bounds(width_factor) if fill_mode not in ["gaussian_noise", "constant"]: raise ValueError( '`fill_mode` should be "gaussian_noise" ' f'or "constant". Got `fill_mode`={fill_mode}' ) if not isinstance(self.height_lower, type(self.height_upper)): raise ValueError( "`height_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.height_lower), type(self.height_upper) ) ) if not isinstance(self.width_lower, type(self.width_upper)): raise ValueError( "`width_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.width_lower), type(self.width_upper) ) ) if self.height_upper < self.height_lower: raise ValueError( "`height_factor` cannot have upper bound less than " "lower bound, got {}".format(height_factor) ) self._height_is_float = isinstance(self.height_lower, float) if self._height_is_float: if not self.height_lower >= 0.0 or not self.height_upper <= 1.0: raise ValueError( "`height_factor` must have values between [0, 1] " "when is float, got {}".format(height_factor) ) if self.width_upper < self.width_lower: raise ValueError( "`width_factor` cannot have upper bound less than " "lower bound, got {}".format(width_factor) ) self._width_is_float = isinstance(self.width_lower, float) if self._width_is_float: if not self.width_lower >= 0.0 or not self.width_upper <= 1.0: raise ValueError( "`width_factor` must have values between [0, 1] " "when is float, got {}".format(width_factor) ) self.fill_mode = fill_mode self.fill_value = fill_value self.seed = seed def _parse_bounds(self, factor): if isinstance(factor, (tuple, list)): return factor[0], factor[1] else: return type(factor)(0), factor @tf.function(jit_compile=True) def call(self, inputs, training=True): augment = lambda: tf.map_fn(self._random_cutout, inputs) no_augment = lambda: inputs return tf.cond(tf.cast(training, tf.bool), augment, no_augment) def _random_cutout(self, input): center_x, center_y = self._compute_rectangle_position(input) rectangle_height, rectangle_width = self._compute_rectangle_size(input) rectangle_fill = self._compute_rectangle_fill(input) input = fill_single_rectangle( input, center_x, center_y, rectangle_width, rectangle_height, rectangle_fill, ) return input def _compute_rectangle_position(self, inputs): input_shape = tf.shape(inputs) image_height, image_width = ( input_shape[0], input_shape[1], ) center_x = tf.random.uniform( shape=[], minval=0, maxval=image_width, dtype=tf.int32, seed=self.seed, ) center_y = tf.random.uniform( shape=[], minval=0, maxval=image_height, dtype=tf.int32, seed=self.seed, ) return center_x, center_y def _compute_rectangle_size(self, inputs): input_shape = tf.shape(inputs) image_height, image_width = ( input_shape[0], input_shape[1], ) height = tf.random.uniform( [], minval=self.height_lower, maxval=self.height_upper, dtype=tf.float32, ) width = tf.random.uniform( [], minval=self.width_lower, maxval=self.width_upper, dtype=tf.float32, ) if self._height_is_float: height = height * tf.cast(image_height, tf.float32) if self._width_is_float: width = width * tf.cast(image_width, tf.float32) height = tf.cast(tf.math.ceil(height), tf.int32) width = tf.cast(tf.math.ceil(width), tf.int32) height = tf.minimum(height, image_height) width = tf.minimum(width, image_width) return height, width def _compute_rectangle_fill(self, inputs): input_shape = tf.shape(inputs) if self.fill_mode == "constant": fill_value = tf.fill(input_shape, self.fill_value) else: # gaussian noise fill_value = tf.random.normal(input_shape) return fill_value def get_config(self): config = { "height_factor": self.height_factor, "width_factor": self.width_factor, "fill_mode": self.fill_mode, "fill_value": self.fill_value, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) """ ## tf.vectorized_map """ class VMapRandomCutout(layers.Layer): def __init__( self, height_factor, width_factor, fill_mode="constant", fill_value=0.0, seed=None, **kwargs, ): super().__init__(**kwargs) self.height_lower, self.height_upper = self._parse_bounds(height_factor) self.width_lower, self.width_upper = self._parse_bounds(width_factor) if fill_mode not in ["gaussian_noise", "constant"]: raise ValueError( '`fill_mode` should be "gaussian_noise" ' f'or "constant". Got `fill_mode`={fill_mode}' ) if not isinstance(self.height_lower, type(self.height_upper)): raise ValueError( "`height_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.height_lower), type(self.height_upper) ) ) if not isinstance(self.width_lower, type(self.width_upper)): raise ValueError( "`width_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.width_lower), type(self.width_upper) ) ) if self.height_upper < self.height_lower: raise ValueError( "`height_factor` cannot have upper bound less than " "lower bound, got {}".format(height_factor) ) self._height_is_float = isinstance(self.height_lower, float) if self._height_is_float: if not self.height_lower >= 0.0 or not self.height_upper <= 1.0: raise ValueError( "`height_factor` must have values between [0, 1] " "when is float, got {}".format(height_factor) ) if self.width_upper < self.width_lower: raise ValueError( "`width_factor` cannot have upper bound less than " "lower bound, got {}".format(width_factor) ) self._width_is_float = isinstance(self.width_lower, float) if self._width_is_float: if not self.width_lower >= 0.0 or not self.width_upper <= 1.0: raise ValueError( "`width_factor` must have values between [0, 1] " "when is float, got {}".format(width_factor) ) self.fill_mode = fill_mode self.fill_value = fill_value self.seed = seed def _parse_bounds(self, factor): if isinstance(factor, (tuple, list)): return factor[0], factor[1] else: return type(factor)(0), factor @tf.function(jit_compile=True) def call(self, inputs, training=True): augment = lambda: tf.vectorized_map(self._random_cutout, inputs) no_augment = lambda: inputs return tf.cond(tf.cast(training, tf.bool), augment, no_augment) def _random_cutout(self, input): center_x, center_y = self._compute_rectangle_position(input) rectangle_height, rectangle_width = self._compute_rectangle_size(input) rectangle_fill = self._compute_rectangle_fill(input) input = fill_single_rectangle( input, center_x, center_y, rectangle_width, rectangle_height, rectangle_fill, ) return input def _compute_rectangle_position(self, inputs): input_shape = tf.shape(inputs) image_height, image_width = ( input_shape[0], input_shape[1], ) center_x = tf.random.uniform( shape=[], minval=0, maxval=image_width, dtype=tf.int32, seed=self.seed, ) center_y = tf.random.uniform( shape=[], minval=0, maxval=image_height, dtype=tf.int32, seed=self.seed, ) return center_x, center_y def _compute_rectangle_size(self, inputs): input_shape = tf.shape(inputs) image_height, image_width = ( input_shape[0], input_shape[1], ) height = tf.random.uniform( [], minval=self.height_lower, maxval=self.height_upper, dtype=tf.float32, ) width = tf.random.uniform( [], minval=self.width_lower, maxval=self.width_upper, dtype=tf.float32, ) if self._height_is_float: height = height * tf.cast(image_height, tf.float32) if self._width_is_float: width = width * tf.cast(image_width, tf.float32) height = tf.cast(tf.math.ceil(height), tf.int32) width = tf.cast(tf.math.ceil(width), tf.int32) height = tf.minimum(height, image_height) width = tf.minimum(width, image_width) return height, width def _compute_rectangle_fill(self, inputs): input_shape = tf.shape(inputs) if self.fill_mode == "constant": fill_value = tf.fill(input_shape, self.fill_value) else: # gaussian noise fill_value = tf.random.normal(input_shape) return fill_value def get_config(self): config = { "height_factor": self.height_factor, "width_factor": self.width_factor, "fill_mode": self.fill_mode, "fill_value": self.fill_value, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) """ JIT COMPILED # Layer Implementations ## Fully Vectorized """ class JITVectorizedRandomCutout(layers.Layer): def __init__( self, height_factor, width_factor, fill_mode="constant", fill_value=0.0, seed=None, **kwargs, ): super().__init__(**kwargs) self.height_lower, self.height_upper = self._parse_bounds(height_factor) self.width_lower, self.width_upper = self._parse_bounds(width_factor) if fill_mode not in ["gaussian_noise", "constant"]: raise ValueError( '`fill_mode` should be "gaussian_noise" ' f'or "constant". Got `fill_mode`={fill_mode}' ) if not isinstance(self.height_lower, type(self.height_upper)): raise ValueError( "`height_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.height_lower), type(self.height_upper) ) ) if not isinstance(self.width_lower, type(self.width_upper)): raise ValueError( "`width_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.width_lower), type(self.width_upper) ) ) if self.height_upper < self.height_lower: raise ValueError( "`height_factor` cannot have upper bound less than " "lower bound, got {}".format(height_factor) ) self._height_is_float = isinstance(self.height_lower, float) if self._height_is_float: if not self.height_lower >= 0.0 or not self.height_upper <= 1.0: raise ValueError( "`height_factor` must have values between [0, 1] " "when is float, got {}".format(height_factor) ) if self.width_upper < self.width_lower: raise ValueError( "`width_factor` cannot have upper bound less than " "lower bound, got {}".format(width_factor) ) self._width_is_float = isinstance(self.width_lower, float) if self._width_is_float: if not self.width_lower >= 0.0 or not self.width_upper <= 1.0: raise ValueError( "`width_factor` must have values between [0, 1] " "when is float, got {}".format(width_factor) ) self.fill_mode = fill_mode self.fill_value = fill_value self.seed = seed def _parse_bounds(self, factor): if isinstance(factor, (tuple, list)): return factor[0], factor[1] else: return type(factor)(0), factor @tf.function(jit_compile=True) def call(self, inputs, training=True): if training is None: training = backend.learning_phase() augment = lambda: self._random_cutout(inputs) no_augment = lambda: inputs return tf.cond(tf.cast(training, tf.bool), augment, no_augment) def _random_cutout(self, inputs): """Apply random cutout.""" center_x, center_y = self._compute_rectangle_position(inputs) rectangle_height, rectangle_width = self._compute_rectangle_size(inputs) rectangle_fill = self._compute_rectangle_fill(inputs) inputs = fill_utils.fill_rectangle( inputs, center_x, center_y, rectangle_width, rectangle_height, rectangle_fill, ) return inputs def _compute_rectangle_position(self, inputs): input_shape = tf.shape(inputs) batch_size, image_height, image_width = ( input_shape[0], input_shape[1], input_shape[2], ) center_x = tf.random.uniform( shape=[batch_size], minval=0, maxval=image_width, dtype=tf.int32, seed=self.seed, ) center_y = tf.random.uniform( shape=[batch_size], minval=0, maxval=image_height, dtype=tf.int32, seed=self.seed, ) return center_x, center_y def _compute_rectangle_size(self, inputs): input_shape = tf.shape(inputs) batch_size, image_height, image_width = ( input_shape[0], input_shape[1], input_shape[2], ) height = tf.random.uniform( [batch_size], minval=self.height_lower, maxval=self.height_upper, dtype=tf.float32, ) width = tf.random.uniform( [batch_size], minval=self.width_lower, maxval=self.width_upper, dtype=tf.float32, ) if self._height_is_float: height = height * tf.cast(image_height, tf.float32) if self._width_is_float: width = width * tf.cast(image_width, tf.float32) height = tf.cast(tf.math.ceil(height), tf.int32) width = tf.cast(tf.math.ceil(width), tf.int32) height = tf.minimum(height, image_height) width = tf.minimum(width, image_width) return height, width def _compute_rectangle_fill(self, inputs): input_shape = tf.shape(inputs) if self.fill_mode == "constant": fill_value = tf.fill(input_shape, self.fill_value) else: # gaussian noise fill_value = tf.random.normal(input_shape) return fill_value def get_config(self): config = { "height_factor": self.height_factor, "width_factor": self.width_factor, "fill_mode": self.fill_mode, "fill_value": self.fill_value, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) """ ## tf.map_fn """ class JITMapFnRandomCutout(layers.Layer): def __init__( self, height_factor, width_factor, fill_mode="constant", fill_value=0.0, seed=None, **kwargs, ): super().__init__(**kwargs) self.height_lower, self.height_upper = self._parse_bounds(height_factor) self.width_lower, self.width_upper = self._parse_bounds(width_factor) if fill_mode not in ["gaussian_noise", "constant"]: raise ValueError( '`fill_mode` should be "gaussian_noise" ' f'or "constant". Got `fill_mode`={fill_mode}' ) if not isinstance(self.height_lower, type(self.height_upper)): raise ValueError( "`height_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.height_lower), type(self.height_upper) ) ) if not isinstance(self.width_lower, type(self.width_upper)): raise ValueError( "`width_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.width_lower), type(self.width_upper) ) ) if self.height_upper < self.height_lower: raise ValueError( "`height_factor` cannot have upper bound less than " "lower bound, got {}".format(height_factor) ) self._height_is_float = isinstance(self.height_lower, float) if self._height_is_float: if not self.height_lower >= 0.0 or not self.height_upper <= 1.0: raise ValueError( "`height_factor` must have values between [0, 1] " "when is float, got {}".format(height_factor) ) if self.width_upper < self.width_lower: raise ValueError( "`width_factor` cannot have upper bound less than " "lower bound, got {}".format(width_factor) ) self._width_is_float = isinstance(self.width_lower, float) if self._width_is_float: if not self.width_lower >= 0.0 or not self.width_upper <= 1.0: raise ValueError( "`width_factor` must have values between [0, 1] " "when is float, got {}".format(width_factor) ) self.fill_mode = fill_mode self.fill_value = fill_value self.seed = seed def _parse_bounds(self, factor): if isinstance(factor, (tuple, list)): return factor[0], factor[1] else: return type(factor)(0), factor @tf.function(jit_compile=True) def call(self, inputs, training=True): augment = lambda: tf.map_fn(self._random_cutout, inputs) no_augment = lambda: inputs return tf.cond(tf.cast(training, tf.bool), augment, no_augment) def _random_cutout(self, input): center_x, center_y = self._compute_rectangle_position(input) rectangle_height, rectangle_width = self._compute_rectangle_size(input) rectangle_fill = self._compute_rectangle_fill(input) input = fill_single_rectangle( input, center_x, center_y, rectangle_width, rectangle_height, rectangle_fill, ) return input def _compute_rectangle_position(self, inputs): input_shape = tf.shape(inputs) image_height, image_width = ( input_shape[0], input_shape[1], ) center_x = tf.random.uniform( shape=[], minval=0, maxval=image_width, dtype=tf.int32, seed=self.seed, ) center_y = tf.random.uniform( shape=[], minval=0, maxval=image_height, dtype=tf.int32, seed=self.seed, ) return center_x, center_y def _compute_rectangle_size(self, inputs): input_shape = tf.shape(inputs) image_height, image_width = ( input_shape[0], input_shape[1], ) height = tf.random.uniform( [], minval=self.height_lower, maxval=self.height_upper, dtype=tf.float32, ) width = tf.random.uniform( [], minval=self.width_lower, maxval=self.width_upper, dtype=tf.float32, ) if self._height_is_float: height = height * tf.cast(image_height, tf.float32) if self._width_is_float: width = width * tf.cast(image_width, tf.float32) height = tf.cast(tf.math.ceil(height), tf.int32) width = tf.cast(tf.math.ceil(width), tf.int32) height = tf.minimum(height, image_height) width = tf.minimum(width, image_width) return height, width def _compute_rectangle_fill(self, inputs): input_shape = tf.shape(inputs) if self.fill_mode == "constant": fill_value = tf.fill(input_shape, self.fill_value) else: # gaussian noise fill_value = tf.random.normal(input_shape) return fill_value def get_config(self): config = { "height_factor": self.height_factor, "width_factor": self.width_factor, "fill_mode": self.fill_mode, "fill_value": self.fill_value, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) """ ## tf.vectorized_map """ class JITVMapRandomCutout(layers.Layer): def __init__( self, height_factor, width_factor, fill_mode="constant", fill_value=0.0, seed=None, **kwargs, ): super().__init__(**kwargs) self.height_lower, self.height_upper = self._parse_bounds(height_factor) self.width_lower, self.width_upper = self._parse_bounds(width_factor) if fill_mode not in ["gaussian_noise", "constant"]: raise ValueError( '`fill_mode` should be "gaussian_noise" ' f'or "constant". Got `fill_mode`={fill_mode}' ) if not isinstance(self.height_lower, type(self.height_upper)): raise ValueError( "`height_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.height_lower), type(self.height_upper) ) ) if not isinstance(self.width_lower, type(self.width_upper)): raise ValueError( "`width_factor` must have lower bound and upper bound " "with same type, got {} and {}".format( type(self.width_lower), type(self.width_upper) ) ) if self.height_upper < self.height_lower: raise ValueError( "`height_factor` cannot have upper bound less than " "lower bound, got {}".format(height_factor) ) self._height_is_float = isinstance(self.height_lower, float) if self._height_is_float: if not self.height_lower >= 0.0 or not self.height_upper <= 1.0: raise ValueError( "`height_factor` must have values between [0, 1] " "when is float, got {}".format(height_factor) ) if self.width_upper < self.width_lower: raise ValueError( "`width_factor` cannot have upper bound less than " "lower bound, got {}".format(width_factor) ) self._width_is_float = isinstance(self.width_lower, float) if self._width_is_float: if not self.width_lower >= 0.0 or not self.width_upper <= 1.0: raise ValueError( "`width_factor` must have values between [0, 1] " "when is float, got {}".format(width_factor) ) self.fill_mode = fill_mode self.fill_value = fill_value self.seed = seed def _parse_bounds(self, factor): if isinstance(factor, (tuple, list)): return factor[0], factor[1] else: return type(factor)(0), factor @tf.function(jit_compile=True) def call(self, inputs, training=True): augment = lambda: tf.vectorized_map(self._random_cutout, inputs) no_augment = lambda: inputs return tf.cond(tf.cast(training, tf.bool), augment, no_augment) def _random_cutout(self, input): center_x, center_y = self._compute_rectangle_position(input) rectangle_height, rectangle_width = self._compute_rectangle_size(input) rectangle_fill = self._compute_rectangle_fill(input) input = fill_single_rectangle( input, center_x, center_y, rectangle_width, rectangle_height, rectangle_fill, ) return input def _compute_rectangle_position(self, inputs): input_shape = tf.shape(inputs) image_height, image_width = ( input_shape[0], input_shape[1], ) center_x = tf.random.uniform( shape=[], minval=0, maxval=image_width, dtype=tf.int32, seed=self.seed, ) center_y = tf.random.uniform( shape=[], minval=0, maxval=image_height, dtype=tf.int32, seed=self.seed, ) return center_x, center_y def _compute_rectangle_size(self, inputs): input_shape = tf.shape(inputs) image_height, image_width = ( input_shape[0], input_shape[1], ) height = tf.random.uniform( [], minval=self.height_lower, maxval=self.height_upper, dtype=tf.float32, ) width = tf.random.uniform( [], minval=self.width_lower, maxval=self.width_upper, dtype=tf.float32, ) if self._height_is_float: height = height * tf.cast(image_height, tf.float32) if self._width_is_float: width = width * tf.cast(image_width, tf.float32) height = tf.cast(tf.math.ceil(height), tf.int32) width = tf.cast(tf.math.ceil(width), tf.int32) height = tf.minimum(height, image_height) width = tf.minimum(width, image_width) return height, width def _compute_rectangle_fill(self, inputs): input_shape = tf.shape(inputs) if self.fill_mode == "constant": fill_value = tf.fill(input_shape, self.fill_value) else: # gaussian noise fill_value = tf.random.normal(input_shape) return fill_value def get_config(self): config = { "height_factor": self.height_factor, "width_factor": self.width_factor, "fill_mode": self.fill_mode, "fill_value": self.fill_value, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) """ # Benchmarking """ (x_train, _), _ = keras.datasets.cifar10.load_data() x_train = x_train.astype(float) x_train.shape images = [] num_images = [1000, 2000, 5000, 10000, 25000, 37500, 50000] results = {} for aug in [ VectorizedRandomCutout, VMapRandomCutout, MapFnRandomCutout, JITVectorizedRandomCutout, JITVMapRandomCutout, JITMapFnRandomCutout, ]: c = aug.__name__ layer = aug(0.2, 0.2) runtimes = [] print(f"Timing {c}") for n_images in num_images: # warmup layer(x_train[:n_images]) t0 = time.time() r1 = layer(x_train[:n_images]) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1-t0}") results[c] = runtimes plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.show() """ # Sanity check all of these should have comparable outputs """ images = [] for aug in [VectorizedRandomCutout, VMapRandomCutout, MapFnRandomCutout]: layer = aug(0.5, 0.5) images.append(layer(x_train[:3])) images = [y for x in images for y in x] plt.figure(figsize=(8, 8)) for i in range(9): plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) plt.axis("off") plt.show() """ # Extra notes ## Warnings it would be really annoying as a user to use an official keras_cv component and get warned that "RandomUniform" or "RandomUniformInt" inside pfor may not get the same output. """
keras-cv/benchmarks/vectorization_strategy_benchmark.py/0
{ "file_path": "keras-cv/benchmarks/vectorization_strategy_benchmark.py", "repo_id": "keras-cv", "token_count": 19626 }
0
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from keras import backend from tensorflow import keras from keras_cv.layers import RandomTranslation from keras_cv.layers.preprocessing.base_image_augmentation_layer import ( BaseImageAugmentationLayer, ) from keras_cv.utils import preprocessing as preprocessing_utils H_AXIS = -3 W_AXIS = -2 def check_fill_mode_and_interpolation(fill_mode, interpolation): if fill_mode not in {"reflect", "wrap", "constant", "nearest"}: raise NotImplementedError( f"Unknown `fill_mode` {fill_mode}. Only `reflect`, `wrap`, " "`constant` and `nearest` are supported." ) if interpolation not in {"nearest", "bilinear"}: raise NotImplementedError( f"Unknown `interpolation` {interpolation}. Only `nearest` and " "`bilinear` are supported." ) def get_translation_matrix(translations, name=None): """Returns projective transform(s) for the given translation(s). Args: translations: A matrix of 2-element lists representing `[dx, dy]` to translate for each image (for a batch of images). name: The name of the op. Returns: A tensor of shape `(num_images, 8)` projective transforms which can be given to `transform`. """ with backend.name_scope(name or "translation_matrix"): num_translations = tf.shape(translations)[0] # The translation matrix looks like: # [[1 0 -dx] # [0 1 -dy] # [0 0 1]] # where the last entry is implicit. # Translation matrices are always float32. return tf.concat( values=[ tf.ones((num_translations, 1), tf.float32), tf.zeros((num_translations, 1), tf.float32), -translations[:, 0, None], tf.zeros((num_translations, 1), tf.float32), tf.ones((num_translations, 1), tf.float32), -translations[:, 1, None], tf.zeros((num_translations, 2), tf.float32), ], axis=1, ) class OldRandomTranslation(BaseImageAugmentationLayer): """A preprocessing layer which randomly translates images during training. This layer will apply random translations to each image during training, filling empty space according to `fill_mode`. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of integer or floating point dtype. By default, the layer will output floats. Args: height_factor: a float represented as fraction of value, or a tuple of size 2 representing lower and upper bound for shifting vertically. A negative value means shifting image up, while a positive value means shifting image down. When represented as a single positive float, this value is used for both the upper and lower bound. For instance, `height_factor=(-0.2, 0.3)` results in an output shifted by a random amount in the range `[-20%, +30%]`. `height_factor=0.2` results in an output height shifted by a random amount in the range `[-20%, +20%]`. width_factor: a float represented as fraction of value, or a tuple of size 2 representing lower and upper bound for shifting horizontally. A negative value means shifting image left, while a positive value means shifting image right. When represented as a single positive float, this value is used for both the upper and lower bound. For instance, `width_factor=(-0.2, 0.3)` results in an output shifted left by 20%, and shifted right by 30%. `width_factor=0.2` results in an output height shifted left or right by 20%. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`). - *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by reflecting about the edge of the last pixel. - *constant*: `(k k k k | a b c d | k k k k)` The input is extended by filling all values beyond the edge with the same constant value k = 0. - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by wrapping around to the opposite edge. - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the nearest pixel. interpolation: Interpolation mode. Supported values: `"nearest"`, `"bilinear"`. seed: Integer. Used to create a random seed. fill_value: a float represents the value to be filled outside the boundaries when `fill_mode="constant"`. Input shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format. Output shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format. """ def __init__( self, height_factor, width_factor, fill_mode="reflect", interpolation="bilinear", seed=None, fill_value=0.0, **kwargs, ): super().__init__(seed=seed, **kwargs) self.height_factor = height_factor if isinstance(height_factor, (tuple, list)): self.height_lower = height_factor[0] self.height_upper = height_factor[1] else: self.height_lower = -height_factor self.height_upper = height_factor if self.height_upper < self.height_lower: raise ValueError( "`height_factor` cannot have upper bound less than " f"lower bound, got {height_factor}" ) if abs(self.height_lower) > 1.0 or abs(self.height_upper) > 1.0: raise ValueError( "`height_factor` must have values between [-1, 1], " f"got {height_factor}" ) self.width_factor = width_factor if isinstance(width_factor, (tuple, list)): self.width_lower = width_factor[0] self.width_upper = width_factor[1] else: self.width_lower = -width_factor self.width_upper = width_factor if self.width_upper < self.width_lower: raise ValueError( "`width_factor` cannot have upper bound less than " f"lower bound, got {width_factor}" ) if abs(self.width_lower) > 1.0 or abs(self.width_upper) > 1.0: raise ValueError( "`width_factor` must have values between [-1, 1], " f"got {width_factor}" ) check_fill_mode_and_interpolation(fill_mode, interpolation) self.fill_mode = fill_mode self.fill_value = fill_value self.interpolation = interpolation self.seed = seed def augment_image(self, image, transformation, **kwargs): """Translated inputs with random ops.""" # The transform op only accepts rank 4 inputs, so if we have an # unbatched image, we need to temporarily expand dims to a batch. original_shape = image.shape inputs = tf.expand_dims(image, 0) inputs_shape = tf.shape(inputs) img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32) img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32) height_translation = transformation["height_translation"] width_translation = transformation["width_translation"] height_translation = height_translation * img_hd width_translation = width_translation * img_wd translations = tf.cast( tf.concat([width_translation, height_translation], axis=1), dtype=tf.float32, ) output = preprocessing_utils.transform( inputs, get_translation_matrix(translations), interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, ) output = tf.squeeze(output, 0) output.set_shape(original_shape) return output def get_random_transformation(self, image=None, **kwargs): batch_size = 1 height_translation = self._random_generator.uniform( shape=[batch_size, 1], minval=self.height_lower, maxval=self.height_upper, dtype=tf.float32, ) width_translation = self._random_generator.uniform( shape=[batch_size, 1], minval=self.width_lower, maxval=self.width_upper, dtype=tf.float32, ) return { "height_translation": height_translation, "width_translation": width_translation, } def _batch_augment(self, inputs): # Change to vectorized_map for better performance, as well as work # around issue for different tensorspec between inputs and outputs. return tf.vectorized_map(self._augment, inputs) def augment_label(self, label, transformation, **kwargs): return label def compute_output_shape(self, input_shape): return input_shape def get_config(self): config = { "height_factor": self.height_factor, "width_factor": self.width_factor, "fill_mode": self.fill_mode, "fill_value": self.fill_value, "interpolation": self.interpolation, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) class RandomTranslationTest(tf.test.TestCase): def test_consistency_with_old_impl(self): image_shape = (16, 32, 32, 3) fixed_height_factor = (0.5, 0.5) fixed_width_factor = (0.5, 0.5) image = tf.random.uniform(shape=image_shape) * 255.0 layer = RandomTranslation(fixed_height_factor, fixed_width_factor) old_layer = OldRandomTranslation( fixed_height_factor, fixed_width_factor ) output = layer(image) old_output = old_layer(image) self.assertAllClose(old_output, output) if __name__ == "__main__": # Run benchmark (x_train, _), _ = keras.datasets.cifar10.load_data() x_train = x_train.astype(np.float32) num_images = [100, 200, 500, 1000] results = {} aug_candidates = [RandomTranslation, OldRandomTranslation] aug_args = {"height_factor": 0.5, "width_factor": 0.5} for aug in aug_candidates: # Eager Mode c = aug.__name__ layer = aug(**aug_args) runtimes = [] print(f"Timing {c}") for n_images in num_images: # warmup layer(x_train[:n_images]) t0 = time.time() r1 = layer(x_train[:n_images]) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1-t0}") results[c] = runtimes # Graph Mode c = aug.__name__ + " Graph Mode" layer = aug(**aug_args) @tf.function() def apply_aug(inputs): return layer(inputs) runtimes = [] print(f"Timing {c}") for n_images in num_images: # warmup apply_aug(x_train[:n_images]) t0 = time.time() r1 = apply_aug(x_train[:n_images]) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1-t0}") results[c] = runtimes # XLA Mode # cannot run tf.raw_ops.ImageProjectiveTransformV3 on XLA # for more information please refer: # https://github.com/tensorflow/tensorflow/issues/55194 plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.savefig("comparison.png") # So we can actually see more relevant margins del results[aug_candidates[1].__name__] plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.savefig("comparison_no_old_eager.png") # Run unit tests tf.test.main()
keras-cv/benchmarks/vectorized_random_translation.py/0
{ "file_path": "keras-cv/benchmarks/vectorized_random_translation.py", "repo_id": "keras-cv", "token_count": 5646 }
1
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """random_zoom_demo.py.py shows how to use the RandomZoom preprocessing layer. Operates on an image of elephant. In this script the image is loaded, then are passed through the preprocessing layers. Finally, they are shown using matplotlib. """ import demo_utils from keras_cv.layers.preprocessing import RandomZoom def main(): many_elephants = demo_utils.load_elephant_tensor(output_size=(300, 300)) layer = RandomZoom(0.5) augmented = layer(many_elephants) demo_utils.gallery_show(augmented.numpy()) if __name__ == "__main__": main()
keras-cv/examples/layers/preprocessing/classification/random_zoom_demo.py/0
{ "file_path": "keras-cv/examples/layers/preprocessing/classification/random_zoom_demo.py", "repo_id": "keras-cv", "token_count": 312 }
2
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Title: Training a KerasCV model for Imagenet Classification Author: [Ian Stenbit](https://github.com/ianstenbit) Date created: 2022/07/25 Last modified: 2022/07/25 Description: Use KerasCV to train an image classifier using modern best practices """ import math import sys import tensorflow as tf from absl import flags import keras_cv from keras_cv import models from keras_cv.backend import keras from keras_cv.datasets import imagenet """ ## Overview KerasCV makes training state-of-the-art classification models easy by providing implementations of modern models, preprocessing techniques, and layers. In this tutorial, we walk through training a model against the Imagenet dataset using Keras and KerasCV. This tutorial requires you to have KerasCV installed: ```shell pip install keras-cv ``` Note that this depends on TF>=2.11 """ """ ## Setup, constants and flags """ flags.DEFINE_string( "model_name", None, "The name of the model in KerasCV.models to use." ) flags.DEFINE_string( "imagenet_path", None, "Directory from which to load Imagenet." ) flags.DEFINE_string( "weights_path", None, "Directory which will be used to store weight checkpoints.", ) flags.DEFINE_string( "tensorboard_path", None, "Directory which will be used to store tensorboard logs.", ) flags.DEFINE_integer( "batch_size", 128, "Batch size for training and evaluation. This will be multiplied by the " "number of accelerators in use.", ) flags.DEFINE_boolean( "use_xla", True, "whether to use XLA (jit_compile) for training." ) flags.DEFINE_boolean( "use_mixed_precision", False, "whether to use FP16 mixed precision for training.", ) flags.DEFINE_boolean( "use_ema", True, "whether to use exponential moving average weight updating", ) flags.DEFINE_float( "initial_learning_rate", 0.05, "Initial learning rate which will reduce on plateau. This will be " "multiplied by the number of accelerators in use", ) flags.DEFINE_string( "model_kwargs", "{}", "Keyword argument dictionary to pass to the constructor of the model being " "trained", ) flags.DEFINE_string( "learning_rate_schedule", "ReduceOnPlateau", "String denoting the type of learning rate schedule to be used", ) flags.DEFINE_float( "warmup_steps_percentage", 0.1, "For how many steps expressed in percentage (0..1 float) of total steps " "should the schedule warm up if we're using the warmup schedule", ) flags.DEFINE_float( "warmup_hold_steps_percentage", 0.45, "For how many steps expressed in percentage (0..1 float) of total steps " "should the schedule hold the initial learning rate after warmup is " "finished, and before applying cosine decay.", ) flags.DEFINE_float( "weight_decay", 5e-4, "Weight decay parameter for the optimizer", ) # An upper bound for number of epochs (this script uses EarlyStopping). flags.DEFINE_integer("epochs", 1000, "Epochs to train for") FLAGS = flags.FLAGS FLAGS(sys.argv) NUM_CLASSES = 1000 IMAGE_SIZE = (224, 224) REDUCE_ON_PLATEAU = "ReduceOnPlateau" COSINE_DECAY_WITH_WARMUP = "CosineDecayWithWarmup" if FLAGS.model_name not in models.__dict__: raise ValueError(f"Invalid model name: {FLAGS.model_name}") if FLAGS.use_mixed_precision: keras.mixed_precision.set_global_policy("mixed_float16") """ We start by detecting the type of accelerators we have available and picking an appropriate distribution strategy accordingly. We scale our learning rate and batch size based on the number of accelerators being used. """ # Try to detect an available TPU. If none is present, defaults to # MirroredStrategy try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() strategy = tf.distribute.TPUStrategy(tpu) if FLAGS.use_mixed_precision: keras.mixed_precision.set_global_policy("mixed_bfloat16") except ValueError: # MirroredStrategy is best for a single machine with one or multiple GPUs strategy = tf.distribute.MirroredStrategy() print("Number of accelerators: ", strategy.num_replicas_in_sync) BATCH_SIZE = FLAGS.batch_size * strategy.num_replicas_in_sync INITIAL_LEARNING_RATE = ( FLAGS.initial_learning_rate * strategy.num_replicas_in_sync ) """TFRecord-based tf.data.Dataset loads lazily so we can't get the length of the dataset. Temporary.""" NUM_IMAGES = 1281167 """ ## Data loading This guide uses the [Imagenet dataset](https://www.tensorflow.org/datasets/catalog/imagenet2012). Note that this requires manual download and preprocessing. You can find more information about preparing this dataset at keras_cv/datasets/imagenet/README.md """ train_ds = imagenet.load( split="train", tfrecord_path=FLAGS.imagenet_path, shuffle_buffer=BATCH_SIZE * 8, reshuffle_each_iteration=True, ) test_ds = imagenet.load( split="validation", tfrecord_path=FLAGS.imagenet_path, batch_size=BATCH_SIZE, img_size=IMAGE_SIZE, ) """ Next, we augment our dataset. We define a set of augmentation layers and then apply them to our input dataset. """ random_crop_and_resize = keras_cv.layers.RandomCropAndResize( target_size=IMAGE_SIZE, crop_area_factor=(0.8, 1), aspect_ratio_factor=(3 / 4, 4 / 3), ) @tf.function def crop_and_resize(img, label): inputs = {"images": img, "labels": label} inputs = random_crop_and_resize(inputs) return inputs["images"], inputs["labels"] AUGMENT_LAYERS = [ keras_cv.layers.RandomFlip(mode="horizontal"), keras_cv.layers.RandAugment(value_range=(0, 255), magnitude=0.3), ] @tf.function def augment(img, label): inputs = {"images": img, "labels": label} for layer in AUGMENT_LAYERS: inputs = layer(inputs) if tf.random.uniform(()) > 0.5: inputs = keras_cv.layers.CutMix()(inputs) else: inputs = keras_cv.layers.MixUp()(inputs) return inputs["images"], inputs["labels"] train_ds = ( train_ds.map(crop_and_resize, num_parallel_calls=tf.data.AUTOTUNE) .batch(BATCH_SIZE) .map(augment, num_parallel_calls=tf.data.AUTOTUNE) .prefetch(tf.data.AUTOTUNE) ) test_ds = test_ds.prefetch(tf.data.AUTOTUNE) """ Now we can begin training our model. We begin by loading a model from KerasCV. """ with strategy.scope(): backbone = models.__dict__[FLAGS.model_name] model = models.ImageClassifier( backbone=backbone(input_shape=IMAGE_SIZE + (3,)), num_classes=NUM_CLASSES, activation="softmax", **eval(FLAGS.model_kwargs), ) """ Optional LR schedule with cosine decay instead of ReduceLROnPlateau TODO: Replace with Core Keras LRWarmup when it's released. This is a temporary solution. Convenience method for calculating LR at given timestep, for the WarmUpCosineDecay class. """ def lr_warmup_cosine_decay( global_step, warmup_steps, hold=0, total_steps=0, start_lr=0.0, target_lr=1e-2, ): # Cosine decay learning_rate = ( 0.5 * target_lr * ( 1 + tf.cos( tf.constant(math.pi) * tf.cast(global_step - warmup_steps - hold, tf.float32) / float(total_steps - warmup_steps - hold) ) ) ) warmup_lr = tf.cast(target_lr * (global_step / warmup_steps), tf.float32) target_lr = tf.cast(target_lr, tf.float32) if hold > 0: learning_rate = tf.where( global_step > warmup_steps + hold, learning_rate, target_lr ) learning_rate = tf.where( global_step < warmup_steps, warmup_lr, learning_rate ) return learning_rate """ LearningRateSchedule implementing the learning rate warmup with cosine decay strategy. Learning rate warmup should help with initial training instability, while the decay strategy may be variable, cosine being a popular choice. The schedule will start from 0.0 (or supplied start_lr) and gradually "warm up" linearly to the target_lr. From there, it will apply a cosine decay to the learning rate, after an optional holding period. args: - [float] start_lr: default 0.0, the starting learning rate at the beginning of training from which the warmup starts - [float] target_lr: default 1e-2, the target (initial) learning rate from which you'd usually start without a LR warmup schedule - [int] warmup_steps: number of training steps to warm up for expressed in batches - [int] total_steps: the total steps (epochs * number of batches per epoch) in the dataset - [int] hold: optional argument to hold the target_lr before applying cosine decay on it """ class WarmUpCosineDecay(keras.optimizers.schedules.LearningRateSchedule): def __init__( self, warmup_steps, total_steps, hold, start_lr=0.0, target_lr=1e-2 ): super().__init__() self.start_lr = start_lr self.target_lr = target_lr self.warmup_steps = warmup_steps self.total_steps = total_steps self.hold = hold def __call__(self, step): lr = lr_warmup_cosine_decay( global_step=step, total_steps=self.total_steps, warmup_steps=self.warmup_steps, start_lr=self.start_lr, target_lr=self.target_lr, hold=self.hold, ) return tf.where(step > self.total_steps, 0.0, lr, name="learning_rate") total_steps = (NUM_IMAGES // BATCH_SIZE) * FLAGS.epochs warmup_steps = int(FLAGS.warmup_steps_percentage * total_steps) hold_steps = int(FLAGS.warmup_hold_steps_percentage * total_steps) schedule = WarmUpCosineDecay( start_lr=0.0, target_lr=INITIAL_LEARNING_RATE, warmup_steps=warmup_steps, total_steps=total_steps, hold=hold_steps, ) """ Next, we pick an optimizer. Here we use SGD. Note that learning rate will decrease over time due to the ReduceLROnPlateau callback or with the LRWarmup scheduler. """ with strategy.scope(): if FLAGS.learning_rate_schedule == COSINE_DECAY_WITH_WARMUP: optimizer = keras.optimizers.SGD( weight_decay=FLAGS.weight_decay, learning_rate=schedule, momentum=0.9, use_ema=FLAGS.use_ema, ) else: optimizer = keras.optimizers.SGD( weight_decay=FLAGS.weight_decay, learning_rate=INITIAL_LEARNING_RATE, momentum=0.9, global_clipnorm=10, use_ema=FLAGS.use_ema, ) """ Next, we pick a loss function. We use CategoricalCrossentropy with label smoothing. """ loss_fn = keras.losses.CategoricalCrossentropy(label_smoothing=0.1) """ Next, we specify the metrics that we want to track. For this example, we track accuracy. """ with strategy.scope(): training_metrics = [ keras.metrics.CategoricalAccuracy(), keras.metrics.TopKCategoricalAccuracy(k=5), ] """ As a last piece of configuration, we configure callbacks for the method. We use EarlyStopping, BackupAndRestore, and a model checkpointing callback. """ model_callbacks = [ keras.callbacks.EarlyStopping(patience=20), keras.callbacks.ModelCheckpoint( FLAGS.weights_path, save_weights_only=True, save_best_only=True ), keras.callbacks.TensorBoard( log_dir=FLAGS.tensorboard_path, write_steps_per_second=True ), ] if FLAGS.learning_rate_schedule == REDUCE_ON_PLATEAU: model_callbacks.append( keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.1, patience=10, min_delta=0.001, min_lr=0.0001, ) ) """ We can now compile the model and fit it to the training dataset. """ model.compile( optimizer=optimizer, loss=loss_fn, metrics=training_metrics, jit_compile=FLAGS.use_xla, ) model.fit( train_ds, batch_size=BATCH_SIZE, epochs=FLAGS.epochs, callbacks=model_callbacks, validation_data=test_ds, )
keras-cv/examples/training/classification/imagenet/basic_training.py/0
{ "file_path": "keras-cv/examples/training/classification/imagenet/basic_training.py", "repo_id": "keras-cv", "token_count": 4844 }
3
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import types from keras_cv.backend import config _KERAS_CORE_ALIASES = { "utils->saving": [ "register_keras_serializable", "deserialize_keras_object", "serialize_keras_object", "get_registered_object", ], "models->saving": ["load_model"], } if config.keras_3(): import keras # noqa: F403, F401 from keras import * # noqa: F403, F401 keras.backend.name_scope = keras.name_scope else: from tensorflow import keras # noqa: F403, F401 from tensorflow.keras import * # noqa: F403, F401 if not hasattr(keras, "saving"): keras.saving = types.SimpleNamespace() # add aliases for key, value in _KERAS_CORE_ALIASES.items(): src, _, dst = key.partition("->") src = src.split(".") dst = dst.split(".") src_mod, dst_mod = keras, keras # navigate to where we want to alias the attributes for mod in src: src_mod = getattr(src_mod, mod) for mod in dst: dst_mod = getattr(dst_mod, mod) # add an alias for each attribute for attr in value: if isinstance(attr, tuple): src_attr, dst_attr = attr else: src_attr, dst_attr = attr, attr attr_val = getattr(src_mod, src_attr) setattr(dst_mod, dst_attr, attr_val) # TF Keras doesn't have this rename. keras.activations.silu = keras.activations.swish
keras-cv/keras_cv/backend/keras.py/0
{ "file_path": "keras-cv/keras_cv/backend/keras.py", "repo_id": "keras-cv", "token_count": 834 }
4
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf import keras_cv.bounding_box.validate_format as validate_format from keras_cv.api_export import keras_cv_export from keras_cv.backend.scope import tf_data def _box_shape(batched, boxes_shape, max_boxes): # ensure we dont drop the final axis in RaggedTensor mode if max_boxes is None: shape = list(boxes_shape) shape[-1] = 4 return shape if batched: return [None, max_boxes, 4] return [max_boxes, 4] def _classes_shape(batched, classes_shape, max_boxes): if max_boxes is None: return None if batched: return [None, max_boxes] + classes_shape[2:] return [max_boxes] + classes_shape[2:] @keras_cv_export("keras_cv.bounding_box.to_dense") @tf_data def to_dense(bounding_boxes, max_boxes=None, default_value=-1): """to_dense converts bounding boxes to Dense tensors Args: bounding_boxes: bounding boxes in KerasCV dictionary format. max_boxes: the maximum number of boxes, used to pad tensors to a given shape. This can be used to make object detection pipelines TPU compatible. default_value: the default value to pad bounding boxes with. defaults to -1. """ info = validate_format.validate_format(bounding_boxes) # guards against errors in metrics regarding modification of inputs. # also guards against unexpected behavior when modifying downstream bounding_boxes = bounding_boxes.copy() # Already running in masked mode if not info["ragged"]: # even if already ragged, still copy the dictionary for API consistency return bounding_boxes if isinstance(bounding_boxes["classes"], tf.RaggedTensor): bounding_boxes["classes"] = bounding_boxes["classes"].to_tensor( default_value=default_value, shape=_classes_shape( info["is_batched"], bounding_boxes["classes"].shape, max_boxes ), ) if isinstance(bounding_boxes["boxes"], tf.RaggedTensor): bounding_boxes["boxes"] = bounding_boxes["boxes"].to_tensor( default_value=default_value, shape=_box_shape( info["is_batched"], bounding_boxes["boxes"].shape, max_boxes ), ) if "confidence" in bounding_boxes: if isinstance(bounding_boxes["confidence"], tf.RaggedTensor): bounding_boxes["confidence"] = bounding_boxes[ "confidence" ].to_tensor( default_value=default_value, shape=_classes_shape( info["is_batched"], bounding_boxes["confidence"].shape, max_boxes, ), ) return bounding_boxes
keras-cv/keras_cv/bounding_box/to_dense.py/0
{ "file_path": "keras-cv/keras_cv/bounding_box/to_dense.py", "repo_id": "keras-cv", "token_count": 1323 }
5
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common data structures for Waymo Open Dataset inputs.""" import dataclasses from typing import Optional import tensorflow as tf @dataclasses.dataclass class PointTensors: """Wraps point related tensors.""" # [N, 3] point x, y, z global cartesian coordinates. point_xyz: tf.Tensor # [N, 4] point feature: intensity, elongation, has_second, is_second. point_feature: tf.Tensor # [N, 3] range image row, column indices and sensor id. point_range_image_row_col_sensor_id: tf.Tensor # [N] NLZ (no label zone) mask. Set to true if the point is in NLZ. label_point_nlz: tf.Tensor @dataclasses.dataclass class LabelTensors: """Wraps label related tensors.""" # [M, 7] 3d boxes in [center_{x,y,z}, length, width, height, heading]. label_box: Optional[tf.Tensor] = None # [M] box id. label_box_id: Optional[tf.Tensor] = None # [M, 4] box speed_{x,y} and accel_{x,y}. label_box_meta: Optional[tf.Tensor] = None # [M] box class. label_box_class: Optional[tf.Tensor] = None # [M] number of points in each box. label_box_density: Optional[tf.Tensor] = None # [M] detection difficulty level. label_box_detection_difficulty: Optional[tf.Tensor] = None # [M] valid box mask. label_box_mask: Optional[tf.Tensor] = None # [M] object class of each point. label_point_class: Optional[tf.Tensor] = None
keras-cv/keras_cv/datasets/waymo/struct.py/0
{ "file_path": "keras-cv/keras_cv/datasets/waymo/struct.py", "repo_id": "keras-cv", "token_count": 688 }
6
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from tensorflow import keras from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.backend import assert_tf_keras @keras_cv_export("keras_cv.layers.ROIPooler") class ROIPooler(keras.layers.Layer): """ Pooling feature map of dynamic shape into region of interest (ROI) of fixed shape. Mainly used in Region CNN (RCNN) networks. This works for a single-level input feature map. This layer splits the feature map into [target_size[0], target_size[1]] areas, and performs max pooling for each area. The area coordinates will be quantized. Args: bounding_box_format: a case-insensitive string. For detailed information on the supported format, see the [KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/). target_size: List or Tuple of 2 integers of the pooled shape image_shape: List of Tuple of 3 integers, or `TensorShape` of the input image shape. Usage: ```python feature_map = tf.random.normal([2, 16, 16, 512]) roi_pooler = ROIPooler(bounding_box_format="yxyx", target_size=[7, 7], image_shape=[224, 224, 3]) rois = tf.constant([[[15., 30., 25., 45.]], [[22., 1., 30., 32.]]]) pooled_feature_map = roi_pooler(feature_map, rois) ``` """ # noqa: E501 def __init__( self, bounding_box_format, # TODO(consolidate size vs shape for KPL and here) target_size, image_shape, **kwargs, ): assert_tf_keras("keras_cv.layers.ROIPooler") if not isinstance(target_size, (tuple, list)): raise ValueError( "Expected `target_size` to be tuple or list, got " f"{type(target_size)}" ) if len(target_size) != 2: raise ValueError( f"Expected `target_size` to be size 2, got {len(target_size)}" ) if ( image_shape[0] is None or image_shape[1] is None or image_shape[2] is None ): raise ValueError( f"`image_shape` cannot have dynamic shape, got {image_shape}" ) super().__init__(**kwargs) self.bounding_box_format = bounding_box_format self.target_height = target_size[0] self.target_width = target_size[1] self.image_shape = image_shape self.built = True def call(self, feature_map, rois): """ Args: feature_map: [batch_size, H, W, C] float Tensor, the feature map extracted from image. rois: [batch_size, N, 4] float Tensor, the region of interests to be pooled. Returns: pooled_feature_map: [batch_size, N, target_size, C] float Tensor """ # convert to relative format given feature map shape != image shape rois = bounding_box.convert_format( rois, source=self.bounding_box_format, target="rel_yxyx", image_shape=self.image_shape, ) pooled_feature_map = tf.vectorized_map( self._pool_single_sample, (feature_map, rois) ) return pooled_feature_map def _pool_single_sample(self, args): """ Args: tuple of feature_map: [H, W, C] float Tensor rois: [N, 4] float Tensor Returns: pooled_feature_map: [target_size, C] float Tensor """ feature_map, rois = args num_rois = rois.get_shape().as_list()[0] height, width, channel = feature_map.get_shape().as_list() # TODO (consider vectorize it for better performance) for n in range(num_rois): # [4] roi = rois[n, :] y_start = height * roi[0] x_start = width * roi[1] region_height = height * (roi[2] - roi[0]) region_width = width * (roi[3] - roi[1]) h_step = region_height / self.target_height w_step = region_width / self.target_width regions = [] for i in range(self.target_height): for j in range(self.target_width): height_start = y_start + i * h_step height_end = height_start + h_step height_start = tf.cast(height_start, tf.int32) height_end = tf.cast(height_end, tf.int32) # if feature_map shape smaller than roi, h_step would be 0 # in this case the result will be feature_map[0, 0, ...] height_end = height_start + tf.maximum( 1, height_end - height_start ) width_start = x_start + j * w_step width_end = width_start + w_step width_start = tf.cast(width_start, tf.int32) width_end = tf.cast(width_end, tf.int32) width_end = width_start + tf.maximum( 1, width_end - width_start ) # [h_step, w_step, C] region = feature_map[ height_start:height_end, width_start:width_end, : ] # target_height * target_width * [C] regions.append(tf.reduce_max(region, axis=[0, 1])) regions = tf.reshape( tf.stack(regions), [self.target_height, self.target_width, channel], ) return regions def get_config(self): config = { "bounding_box_format": self.bounding_box_format, "target_size": [self.target_height, self.target_width], "image_shape": self.image_shape, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/layers/object_detection/roi_pool.py/0
{ "file_path": "keras-cv/keras_cv/layers/object_detection/roi_pool.py", "repo_id": "keras-cv", "token_count": 3115 }
7
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops @keras_cv_export("keras_cv.layers.OverlappingPatchingAndEmbedding") class OverlappingPatchingAndEmbedding(keras.layers.Layer): def __init__(self, project_dim=32, patch_size=7, stride=4, **kwargs): """ Overlapping Patching and Embedding layer. Differs from `PatchingAndEmbedding` in that the patch size does not affect the sequence length. It's fully derived from the `stride` parameter. Additionally, no positional embedding is done as part of the layer - only a projection using a `Conv2D` layer. References: - [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) (CVPR 2021) # noqa: E501 - [Official PyTorch implementation](https://github.com/NVlabs/SegFormer/blob/master/mmseg/models/backbones/mix_transformer.py) # noqa: E501 - [Ported from the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/blob/main/deepvision/layers/hierarchical_transformer_encoder.py) # noqa: E501 Args: project_dim: integer, the dimensionality of the projection. Defaults to `32`. patch_size: integer, the size of the patches to encode. Defaults to `7`. stride: integer, the stride to use for the patching before projection. Defaults to `5`. Basic usage: ``` project_dim = 1024 patch_size = 16 encoded_patches = keras_cv.layers.OverlappingPatchingAndEmbedding( project_dim=project_dim, patch_size=patch_size)(img_batch) print(encoded_patches.shape) # (1, 3136, 1024) ``` """ super().__init__(**kwargs) self.project_dim = project_dim self.patch_size = patch_size self.stride = stride self.proj = keras.layers.Conv2D( filters=project_dim, kernel_size=patch_size, strides=stride, padding="same", ) self.norm = keras.layers.LayerNormalization() def call(self, x): x = self.proj(x) # B, H, W, C shape = x.shape x = ops.reshape(x, (-1, shape[1] * shape[2], shape[3])) x = self.norm(x) return x def get_config(self): config = super().get_config() config.update( { "project_dim": self.project_dim, "patch_size": self.patch_size, "stride": self.stride, } ) return config
keras-cv/keras_cv/layers/overlapping_patching_embedding.py/0
{ "file_path": "keras-cv/keras_cv/layers/overlapping_patching_embedding.py", "repo_id": "keras-cv", "token_count": 1363 }
8
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import tensorflow as tf from keras_cv.layers.preprocessing.fourier_mix import FourierMix from keras_cv.tests.test_case import TestCase num_classes = 10 class FourierMixTest(TestCase): def test_return_shapes(self): xs = tf.ones((2, 512, 512, 3)) # randomly sample labels ys = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 2) ys = tf.squeeze(ys) ys = tf.one_hot(ys, num_classes) # randomly sample segmentation mask ys_segmentation_masks = tf.cast( tf.stack( [2 * tf.ones((512, 512)), tf.ones((512, 512))], axis=0, ), tf.uint8, ) ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3) layer = FourierMix() outputs = layer( { "images": xs, "labels": ys, "segmentation_masks": ys_segmentation_masks, } ) xs, ys, ys_segmentation_masks = ( outputs["images"], outputs["labels"], outputs["segmentation_masks"], ) self.assertEqual(xs.shape, (2, 512, 512, 3)) self.assertEqual(ys.shape, (2, 10)) self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3)) def test_fourier_mix_call_results_with_labels(self): xs = tf.cast( tf.stack( [2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))], axis=0, ), tf.float32, ) ys = tf.one_hot(tf.constant([0, 1]), 2) layer = FourierMix() outputs = layer({"images": xs, "labels": ys}) xs, ys = outputs["images"], outputs["labels"] # None of the individual values should still be close to 1 or 0 self.assertNotAllClose(xs, 1.0) self.assertNotAllClose(xs, 2.0) # No labels should still be close to their originals self.assertNotAllClose(ys, 1.0) self.assertNotAllClose(ys, 0.0) def test_mix_up_call_results_with_masks(self): xs = tf.cast( tf.stack( [2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))], axis=0, ), tf.float32, ) ys_segmentation_masks = tf.cast( tf.stack( [2 * tf.ones((4, 4)), tf.ones((4, 4))], axis=0, ), tf.uint8, ) ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3) layer = FourierMix() outputs = layer( {"images": xs, "segmentation_masks": ys_segmentation_masks} ) xs, ys_segmentation_masks = ( outputs["images"], outputs["segmentation_masks"], ) # None of the individual values should still be close to 1 or 0 self.assertNotAllClose(xs, 1.0) self.assertNotAllClose(xs, 2.0) # No masks should still be close to their originals self.assertNotAllClose(ys_segmentation_masks, 1.0) self.assertNotAllClose(ys_segmentation_masks, 0.0) @pytest.mark.tf_only def test_in_tf_function(self): xs = tf.cast( tf.stack( [2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))], axis=0, ), tf.float32, ) ys = tf.one_hot(tf.constant([0, 1]), 2) layer = FourierMix() @tf.function def augment(x, y): return layer({"images": x, "labels": y}) outputs = augment(xs, ys) xs, ys = outputs["images"], outputs["labels"] # None of the individual values should still be close to 1 or 0 self.assertNotAllClose(xs, 1.0) self.assertNotAllClose(xs, 2.0) # No labels should still be close to their originals self.assertNotAllClose(ys, 1.0) self.assertNotAllClose(ys, 0.0) def test_image_input_only(self): xs = tf.cast( tf.stack( [2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0 ), tf.float32, ) layer = FourierMix() with self.assertRaisesRegexp( ValueError, "expects inputs in a dictionary" ): _ = layer(xs) def test_single_image_input(self): xs = tf.ones((512, 512, 3)) ys = tf.one_hot(tf.constant([1]), 2) inputs = {"images": xs, "labels": ys} layer = FourierMix() with self.assertRaisesRegexp( ValueError, "FourierMix received a single image to `call`" ): _ = layer(inputs)
keras-cv/keras_cv/layers/preprocessing/fourier_mix_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/fourier_mix_test.py", "repo_id": "keras-cv", "token_count": 2606 }
9
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.api_export import keras_cv_export from keras_cv.layers.preprocessing.base_image_augmentation_layer import ( BaseImageAugmentationLayer, ) @keras_cv_export("keras_cv.layers.RandomApply") class RandomApply(BaseImageAugmentationLayer): """Apply provided layer to random elements in a batch. Args: layer: a keras `Layer` or `BaseImageAugmentationLayer`. This layer will be applied to randomly chosen samples in a batch. Layer should not modify the size of provided inputs. rate: controls the frequency of applying the layer. 1.0 means all elements in a batch will be modified. 0.0 means no elements will be modified. Defaults to 0.5. batchwise: (Optional) bool, whether to pass entire batches to the underlying layer. When set to true, only a single random sample is drawn to determine if the batch should be passed to the underlying layer. This is useful when using `MixUp()`, `CutMix()`, `Mosaic()`, etc. auto_vectorize: bool, whether to use tf.vectorized_map or tf.map_fn for batched input. Setting this to True might give better performance but currently doesn't work with XLA. Defaults to False. seed: integer, controls random behaviour. Example usage: ``` # Let's declare an example layer that will set all image pixels to zero. zero_out = keras.layers.Lambda(lambda x: {"images": 0 * x["images"]}) # Create a small batch of random, single-channel, 2x2 images: images = tf.random.stateless_uniform(shape=(5, 2, 2, 1), seed=[0, 1]) print(images[..., 0]) # <tf.Tensor: shape=(5, 2, 2), dtype=float32, numpy= # array([[[0.08216608, 0.40928006], # [0.39318466, 0.3162533 ]], # # [[0.34717774, 0.73199546], # [0.56369007, 0.9769211 ]], # # [[0.55243933, 0.13101244], # [0.2941643 , 0.5130266 ]], # # [[0.38977218, 0.80855536], # [0.6040567 , 0.10502195]], # # [[0.51828027, 0.12730157], # [0.288486 , 0.252975 ]]], dtype=float32)> # Apply the layer with 50% probability: random_apply = RandomApply(layer=zero_out, rate=0.5, seed=1234) outputs = random_apply(images) print(outputs[..., 0]) # <tf.Tensor: shape=(5, 2, 2), dtype=float32, numpy= # array([[[0. , 0. ], # [0. , 0. ]], # # [[0.34717774, 0.73199546], # [0.56369007, 0.9769211 ]], # # [[0.55243933, 0.13101244], # [0.2941643 , 0.5130266 ]], # # [[0.38977218, 0.80855536], # [0.6040567 , 0.10502195]], # # [[0. , 0. ], # [0. , 0. ]]], dtype=float32)> # We can observe that the layer has been randomly applied to 2 out of 5 samples. ``` """ def __init__( self, layer, rate=0.5, batchwise=False, auto_vectorize=False, seed=None, **kwargs, ): super().__init__(seed=seed, **kwargs) if not (0 <= rate <= 1.0): raise ValueError( f"rate must be in range [0, 1]. Received rate: {rate}" ) self._layer = layer self._rate = rate self.auto_vectorize = auto_vectorize self.batchwise = batchwise self.seed = seed self.built = True def _should_augment(self): return self._random_generator.uniform(shape=()) > 1.0 - self._rate def _batch_augment(self, inputs): if self.batchwise: # batchwise augmentations if self._should_augment(): return self._layer(inputs) else: return inputs # non-batchwise augmentations return super()._batch_augment(inputs) def _augment(self, inputs): if self._should_augment(): return self._layer(inputs) else: return inputs def get_config(self): config = super().get_config() config.update( { "rate": self._rate, "layer": self._layer, "seed": self.seed, "batchwise": self.batchwise, "auto_vectorize": self.auto_vectorize, } ) return config
keras-cv/keras_cv/layers/preprocessing/random_apply.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_apply.py", "repo_id": "keras-cv", "token_count": 2283 }
10
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.api_export import keras_cv_export from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 VectorizedBaseImageAugmentationLayer, ) from keras_cv.utils import preprocessing as preprocessing_utils @keras_cv_export("keras_cv.layers.RandomContrast") class RandomContrast(VectorizedBaseImageAugmentationLayer): """RandomContrast randomly adjusts contrast. This layer will randomly adjust the contrast of an image or images by a random factor. Contrast is adjusted independently for each channel of each image. For each channel, this layer computes the mean of the image pixels in the channel and then adjusts each component `x` of each pixel to `(x - mean) * contrast_factor + mean`. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and in integer or floating point dtype. By default, the layer will output floats. The output value will be clipped to the range `[0, 255]`, the valid range of RGB colors. Input shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format. Output shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format. Args: value_range: A tuple or a list of two elements. The first value represents the lower bound for values in passed images, the second represents the upper bound. Images passed to the layer should have values within `value_range`. factor: A positive float represented as fraction of value, or a tuple of size 2 representing lower and upper bound. When represented as a single float, lower = upper. The contrast factor will be randomly picked between `[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel, the output will be `(x - mean) * factor + mean` where `mean` is the mean value of the channel. seed: Integer. Used to create a random seed. Usage: ```python (images, labels), _ = keras.datasets.cifar10.load_data() random_contrast = keras_cv.layers.preprocessing.RandomContrast() augmented_images = random_contrast(images) ``` """ def __init__(self, value_range, factor, seed=None, **kwargs): super().__init__(seed=seed, **kwargs) if isinstance(factor, (tuple, list)): min = 1 - factor[0] max = 1 + factor[1] else: min = 1 - factor max = 1 + factor self.factor_input = factor self.factor = preprocessing_utils.parse_factor( (min, max), min_value=-1, max_value=2 ) self.value_range = value_range self.seed = seed def get_random_transformation_batch(self, batch_size, **kwargs): return self.factor(shape=(batch_size, 1, 1, 1)) def augment_ragged_image(self, image, transformation, **kwargs): return self.augment_images( images=image, transformations=transformation, **kwargs ) def augment_images(self, images, transformations, **kwargs): contrast_factors = tf.cast(transformations, dtype=images.dtype) means = tf.reduce_mean(images, axis=(1, 2), keepdims=True) images = (images - means) * contrast_factors + means images = tf.clip_by_value( images, self.value_range[0], self.value_range[1] ) return images def augment_labels(self, labels, transformations, **kwargs): return labels def augment_segmentation_masks( self, segmentation_masks, transformations, **kwargs ): return segmentation_masks def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs): return bounding_boxes def get_config(self): config = { "factor": self.factor_input, "value_range": self.value_range, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): return cls(**config)
keras-cv/keras_cv/layers/preprocessing/random_contrast.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_contrast.py", "repo_id": "keras-cv", "token_count": 1804 }
11
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 VectorizedBaseImageAugmentationLayer, ) from keras_cv.utils import preprocessing as preprocessing_utils # In order to support both unbatched and batched inputs, the horizontal # and vertical axis is reverse indexed H_AXIS = -3 W_AXIS = -2 @keras_cv_export("keras_cv.layers.RandomRotation") class RandomRotation(VectorizedBaseImageAugmentationLayer): """A preprocessing layer which randomly rotates images. This layer will apply random rotations to each image, filling empty space according to `fill_mode`. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of integer or floating point dtype. By default, the layer will output floats. Input shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format Output shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format Arguments: factor: a float represented as fraction of 2 Pi, or a tuple of size 2 representing lower and upper bound for rotating clockwise and counter-clockwise. A positive values means rotating counter clock-wise, while a negative value means clock-wise. When represented as a single float, this value is used for both the upper and lower bound. For instance, `factor=(-0.2, 0.3)` results in an output rotation by a random amount in the range `[-20% * 2pi, 30% * 2pi]`. `factor=0.2` results in an output rotating by a random amount in the range `[-20% * 2pi, 20% * 2pi]`. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`). - *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by reflecting about the edge of the last pixel. - *constant*: `(k k k k | a b c d | k k k k)` The input is extended by filling all values beyond the edge with the same constant value k = 0. - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by wrapping around to the opposite edge. - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the nearest pixel. interpolation: Interpolation mode. Supported values: `"nearest"`, `"bilinear"`. seed: Integer. Used to create a random seed. fill_value: a float represents the value to be filled outside the boundaries when `fill_mode="constant"`. bounding_box_format: The format of bounding boxes of input dataset. Refer https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py for more details on supported bounding box formats. segmentation_classes: an optional integer with the number of classes in the input segmentation mask. Required iff augmenting data with sparse (non one-hot) segmentation masks. Include the background class in this count (e.g. for segmenting dog vs background, this should be set to 2). """ def __init__( self, factor, fill_mode="reflect", interpolation="bilinear", seed=None, fill_value=0.0, bounding_box_format=None, segmentation_classes=None, **kwargs, ): super().__init__(seed=seed, **kwargs) self.factor = factor if isinstance(factor, (tuple, list)): self.lower = factor[0] self.upper = factor[1] else: self.lower = -factor self.upper = factor if self.upper < self.lower: raise ValueError( "Factor cannot have negative values, " "got {}".format(factor) ) preprocessing_utils.check_fill_mode_and_interpolation( fill_mode, interpolation ) self.fill_mode = fill_mode self.fill_value = fill_value self.interpolation = interpolation self.seed = seed self.bounding_box_format = bounding_box_format self.segmentation_classes = segmentation_classes def get_random_transformation_batch(self, batch_size, **kwargs): min_angle = self.lower * 2.0 * np.pi max_angle = self.upper * 2.0 * np.pi angles = self._random_generator.uniform( shape=[batch_size], minval=min_angle, maxval=max_angle ) return {"angles": angles} def augment_ragged_image(self, image, transformation, **kwargs): image = tf.expand_dims(image, axis=0) transformation = { "angles": tf.expand_dims(transformation["angles"], axis=0), } image = self.augment_images( images=image, transformations=transformation, **kwargs ) return tf.squeeze(image, axis=0) def augment_images(self, images, transformations, **kwargs): return self._rotate_images(images, transformations) def augment_labels(self, labels, transformations, **kwargs): return labels def augment_bounding_boxes( self, bounding_boxes, transformations, raw_images=None, **kwargs ): if self.bounding_box_format is None: raise ValueError( "`RandomRotation()` was called with bounding boxes," "but no `bounding_box_format` was specified in the constructor." "Please specify a bounding box format in the constructor. i.e." "`RandomRotation(bounding_box_format='xyxy')`" ) bounding_boxes = bounding_box.to_dense(bounding_boxes) bounding_boxes = bounding_box.convert_format( bounding_boxes, source=self.bounding_box_format, target="xyxy", images=raw_images, ) image_shape = tf.shape(raw_images) h = image_shape[H_AXIS] w = image_shape[W_AXIS] # origin coordinates, all the points on the image are rotated around # this point origin_x = tf.cast(w / 2, dtype=self.compute_dtype) origin_y = tf.cast(h / 2, dtype=self.compute_dtype) angles = -transformations["angles"] angles = angles[:, tf.newaxis, tf.newaxis, tf.newaxis] # calculate coordinates of all four corners of the bounding box boxes = bounding_boxes["boxes"] points = tf.stack( [ tf.stack([boxes[:, :, 0], boxes[:, :, 1]], axis=2), tf.stack([boxes[:, :, 2], boxes[:, :, 1]], axis=2), tf.stack([boxes[:, :, 2], boxes[:, :, 3]], axis=2), tf.stack([boxes[:, :, 0], boxes[:, :, 3]], axis=2), ], axis=2, ) # point_x : x coordinates of all corners of the bounding box point_xs = tf.gather(points, [0], axis=3) point_x_offsets = tf.cast((point_xs - origin_x), dtype=tf.float32) # point_y : y coordinates of all corners of the bounding box point_ys = tf.gather(points, [1], axis=3) point_y_offsets = tf.cast((point_ys - origin_y), dtype=tf.float32) # rotated bounding box coordinates # new_x : new position of x coordinates of corners of bounding box new_x = ( origin_x + tf.multiply(tf.cos(angles), point_x_offsets) - tf.multiply(tf.sin(angles), point_y_offsets) ) # new_y : new position of y coordinates of corners of bounding box new_y = ( origin_y + tf.multiply(tf.sin(angles), point_x_offsets) + tf.multiply(tf.cos(angles), point_y_offsets) ) # rotated bounding box coordinates out = tf.concat([new_x, new_y], axis=3) # find readjusted coordinates of bounding box to represent it in corners # format min_coordinates = tf.math.reduce_min(out, axis=2) max_coordinates = tf.math.reduce_max(out, axis=2) boxes = tf.concat([min_coordinates, max_coordinates], axis=2) bounding_boxes = bounding_boxes.copy() bounding_boxes["boxes"] = boxes bounding_boxes = bounding_box.clip_to_image( bounding_boxes, bounding_box_format="xyxy", images=raw_images, ) # coordinates cannot be float values, it is cast to int32 bounding_boxes = bounding_box.convert_format( bounding_boxes, source="xyxy", target=self.bounding_box_format, dtype=self.compute_dtype, images=raw_images, ) return bounding_boxes def augment_segmentation_masks( self, segmentation_masks, transformations, **kwargs ): # If segmentation_classes is specified, we have a dense segmentation # mask. We therefore one-hot encode before rotation to avoid bad # interpolation during the rotation transformation. We then make the # mask sparse again using tf.argmax. if self.segmentation_classes: one_hot_mask = tf.one_hot( tf.squeeze(tf.cast(segmentation_masks, tf.int32), axis=-1), self.segmentation_classes, ) rotated_one_hot_mask = self._rotate_images( one_hot_mask, transformations ) rotated_mask = tf.argmax(rotated_one_hot_mask, axis=-1) return tf.expand_dims(rotated_mask, axis=-1) else: if segmentation_masks.shape[-1] == 1: raise ValueError( "Segmentation masks must be one-hot encoded, or " "RandomRotate must be initialized with " "`segmentation_classes`. `segmentation_classes` was not " f"specified, and mask has shape {segmentation_masks.shape}" ) rotated_mask = self._rotate_images( segmentation_masks, transformations ) # Round because we are in one-hot encoding, and we may have # pixels with ambiguous value due to floating point math for # rotation. return tf.round(rotated_mask) def _rotate_images(self, images, transformations): images = preprocessing_utils.ensure_tensor(images, self.compute_dtype) original_shape = images.shape image_shape = tf.shape(images) img_hd = tf.cast(image_shape[H_AXIS], tf.float32) img_wd = tf.cast(image_shape[W_AXIS], tf.float32) angles = transformations["angles"] outputs = preprocessing_utils.transform( images, preprocessing_utils.get_rotation_matrix(angles, img_hd, img_wd), fill_mode=self.fill_mode, fill_value=self.fill_value, interpolation=self.interpolation, ) outputs.set_shape(original_shape) return outputs def get_config(self): config = { "factor": self.factor, "fill_mode": self.fill_mode, "fill_value": self.fill_value, "interpolation": self.interpolation, "bounding_box_format": self.bounding_box_format, "segmentation_classes": self.segmentation_classes, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): return cls(**config)
keras-cv/keras_cv/layers/preprocessing/random_rotation.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_rotation.py", "repo_id": "keras-cv", "token_count": 5244 }
12
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.backend import tf_ops from keras_cv.layers.preprocessing.base_image_augmentation_layer import ( BaseImageAugmentationLayer, ) from keras_cv.utils import get_interpolation H_AXIS = -3 W_AXIS = -2 supported_keys = [ "images", "labels", "targets", "bounding_boxes", "segmentation_masks", ] @keras_cv_export("keras_cv.layers.Resizing") class Resizing(BaseImageAugmentationLayer): """A preprocessing layer which resizes images. This layer resizes an image input to a target height and width. The input should be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"` format. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of integer or floating point dtype. By default, the layer will output floats. This layer can be called on tf.RaggedTensor batches of input images of distinct sizes, and will resize the outputs to dense tensors of uniform size. For an overview and full list of preprocessing layers, see the preprocessing [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers). Args: height: Integer, the height of the output shape. width: Integer, the width of the output shape. interpolation: String, the interpolation method, defaults to `"bilinear"`. Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`, `"lanczos3"`, `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`. crop_to_aspect_ratio: If True, resize the images without aspect ratio distortion. When the original aspect ratio differs from the target aspect ratio, the output image will be cropped to return the largest possible window in the image (of size `(height, width)`) that matches the target aspect ratio. By default, (`crop_to_aspect_ratio=False`), aspect ratio may not be preserved. pad_to_aspect_ratio: If True, resize the images without aspect ratio distortion. When the original aspect ratio differs from the target aspect ratio, the output image will be padded to return the largest possible resize of the image (of size `(height, width)`) that matches the target aspect ratio. By default, (`pad_to_aspect_ratio=False`), aspect ratio may not be preserved. bounding_box_format: The format of bounding boxes of input dataset. Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py for more details on supported bounding box formats. """ def __init__( self, height, width, interpolation="bilinear", crop_to_aspect_ratio=False, pad_to_aspect_ratio=False, bounding_box_format=None, **kwargs, ): self.height = height self.width = width self.interpolation = interpolation self.crop_to_aspect_ratio = crop_to_aspect_ratio self.pad_to_aspect_ratio = pad_to_aspect_ratio self._interpolation_method = get_interpolation(interpolation) self.bounding_box_format = bounding_box_format self.force_output_dense_images = True if pad_to_aspect_ratio and crop_to_aspect_ratio: raise ValueError( "`Resizing()` expects at most one of `crop_to_aspect_ratio` or " "`pad_to_aspect_ratio` to be True." ) if not pad_to_aspect_ratio and bounding_box_format: raise ValueError( "Resizing() only supports bounding boxes when in " "`pad_to_aspect_ratio=True` mode. " "Please pass `pad_to_aspect_ratio=True`" "when processing bounding boxes with `Resizing()`" ) super().__init__(**kwargs) def compute_image_signature(self, images): return tf.TensorSpec( shape=(self.height, self.width, images.shape[-1]), dtype=self.compute_dtype, ) def _augment(self, inputs): images = inputs.get("images", None) bounding_boxes = inputs.get("bounding_boxes", None) segmentation_masks = inputs.get("segmentation_masks", None) if images is not None: images = tf.expand_dims(images, axis=0) inputs["images"] = images if bounding_boxes is not None: bounding_boxes = bounding_boxes.copy() bounding_boxes["classes"] = tf.expand_dims( bounding_boxes["classes"], axis=0 ) bounding_boxes["boxes"] = tf.expand_dims( bounding_boxes["boxes"], axis=0 ) inputs["bounding_boxes"] = bounding_boxes if segmentation_masks is not None: segmentation_masks = tf.expand_dims(segmentation_masks, axis=0) inputs["segmentation_masks"] = segmentation_masks outputs = self._batch_augment(inputs) if images is not None: images = tf.squeeze(outputs["images"], axis=0) inputs["images"] = images if bounding_boxes is not None: outputs["bounding_boxes"]["classes"] = tf.squeeze( outputs["bounding_boxes"]["classes"], axis=0 ) outputs["bounding_boxes"]["boxes"] = tf.squeeze( outputs["bounding_boxes"]["boxes"], axis=0 ) inputs["bounding_boxes"] = outputs["bounding_boxes"] if segmentation_masks is not None: segmentation_masks = tf.squeeze( outputs["segmentation_masks"], axis=0 ) inputs["segmentation_masks"] = segmentation_masks return inputs def _resize_with_distortion(self, inputs): images = inputs.get("images", None) segmentation_masks = inputs.get("segmentation_masks", None) size = [self.height, self.width] images = tf.image.resize( images, size=size, method=self._interpolation_method ) images = tf.cast(images, self.compute_dtype) if segmentation_masks is not None: segmentation_masks = tf.image.resize( segmentation_masks, size=size, method="nearest" ) inputs["images"] = images inputs["segmentation_masks"] = segmentation_masks return inputs def _resize_with_pad(self, inputs): def resize_single_with_pad_to_aspect(x): image = x.get("images", None) bounding_boxes = x.get("bounding_boxes", None) segmentation_masks = x.get("segmentation_masks", None) # images must be dense-able at this point. if isinstance(image, tf.RaggedTensor): image = image.to_tensor() img_size = tf.shape(image) img_height = tf.cast(img_size[H_AXIS], self.compute_dtype) img_width = tf.cast(img_size[W_AXIS], self.compute_dtype) if bounding_boxes is not None: bounding_boxes = bounding_box.to_dense(bounding_boxes) bounding_boxes = bounding_box.convert_format( bounding_boxes, image_shape=img_size, source=self.bounding_box_format, target="rel_xyxy", ) # how much we scale height by to hit target height height_scale = self.height / img_height width_scale = self.width / img_width resize_scale = tf.math.minimum(height_scale, width_scale) target_height = img_height * resize_scale target_width = img_width * resize_scale image = tf.image.resize( image, size=(target_height, target_width), method=self._interpolation_method, ) if bounding_boxes is not None: bounding_boxes = bounding_box.convert_format( bounding_boxes, images=image, source="rel_xyxy", target="xyxy", ) image = tf.image.pad_to_bounding_box( image, 0, 0, self.height, self.width ) if bounding_boxes is not None: bounding_boxes = bounding_box.clip_to_image( bounding_boxes, images=image, bounding_box_format="xyxy" ) bounding_boxes = bounding_box.convert_format( bounding_boxes, images=image, source="xyxy", target=self.bounding_box_format, ) inputs["images"] = image if bounding_boxes is not None: inputs["bounding_boxes"] = bounding_box.to_ragged( bounding_boxes ) if segmentation_masks is not None: segmentation_masks = tf.image.resize( segmentation_masks, size=(target_height, target_width), method="nearest", ) segmentation_masks = tf.image.pad_to_bounding_box( tf.cast(segmentation_masks, dtype="float32"), 0, 0, self.height, self.width, ) inputs["segmentation_masks"] = segmentation_masks return inputs size_as_shape = tf.TensorShape((self.height, self.width)) shape = size_as_shape + inputs["images"].shape[-1:] img_spec = tf.TensorSpec(shape, self.compute_dtype) fn_output_signature = {"images": img_spec} bounding_boxes = inputs.get("bounding_boxes", None) if bounding_boxes is not None: boxes_spec = self._compute_bounding_box_signature(bounding_boxes) fn_output_signature["bounding_boxes"] = boxes_spec segmentation_masks = inputs.get("segmentation_masks", None) if segmentation_masks is not None: seg_map_shape = ( size_as_shape + inputs["segmentation_masks"].shape[-1:] ) seg_map_spec = tf.TensorSpec(seg_map_shape, self.compute_dtype) fn_output_signature["segmentation_masks"] = seg_map_spec return tf.map_fn( resize_single_with_pad_to_aspect, inputs, fn_output_signature=fn_output_signature, ) def _resize_with_crop(self, inputs): images = inputs.get("images", None) bounding_boxes = inputs.get("bounding_boxes", None) segmentation_masks = inputs.get("segmentation_masks", None) if bounding_boxes is not None: raise ValueError( "Resizing(crop_to_aspect_ratio=True) does not support " "bounding box inputs. Please use `pad_to_aspect_ratio=True` " "when processing bounding boxes with Resizing()." ) inputs["images"] = images size = [self.height, self.width] # tf.image.resize will always output float32 and operate more # efficiently on float32 unless interpolation is nearest, in which case # output type matches input type. if self.interpolation == "nearest": input_dtype = self.compute_dtype else: input_dtype = tf.float32 def resize_with_crop_to_aspect(x, interpolation_method): if isinstance(x, tf.RaggedTensor): x = x.to_tensor() return tf_ops.smart_resize( x, size=size, interpolation=interpolation_method, ) def resize_with_crop_to_aspect_images(x): return resize_with_crop_to_aspect( x, interpolation_method=self._interpolation_method ) def resize_with_crop_to_aspect_masks(x): return resize_with_crop_to_aspect(x, interpolation_method="nearest") if isinstance(images, tf.RaggedTensor): size_as_shape = tf.TensorShape(size) shape = size_as_shape + images.shape[-1:] spec = tf.TensorSpec(shape, input_dtype) images = tf.map_fn( resize_with_crop_to_aspect_images, images, fn_output_signature=spec, ) else: images = resize_with_crop_to_aspect_images(images) inputs["images"] = images if segmentation_masks is not None: if isinstance(segmentation_masks, tf.RaggedTensor): size_as_shape = tf.TensorShape(size) shape = size_as_shape + segmentation_masks.shape[-1:] spec = tf.TensorSpec(shape, input_dtype) segmentation_masks = tf.map_fn( resize_with_crop_to_aspect_masks, segmentation_masks, fn_output_signature=spec, ) else: segmentation_masks = resize_with_crop_to_aspect_masks( segmentation_masks ) inputs["segmentation_masks"] = segmentation_masks return inputs def _check_inputs(self, inputs): for key in inputs: if key not in supported_keys: raise ValueError( "Resizing() currently only supports keys " f"[{', '.join(supported_keys)}]. " f"Key `{key}` found in inputs to `Resizing()`. " ) def _batch_augment(self, inputs): if ( inputs.get("bounding_boxes", None) is not None and self.bounding_box_format is None ): raise ValueError( "Resizing requires `bounding_box_format` to be set when " "augmenting bounding boxes, but " "`self.bounding_box_format=None`." ) if self.crop_to_aspect_ratio: return self._resize_with_crop(inputs) if self.pad_to_aspect_ratio: return self._resize_with_pad(inputs) return self._resize_with_distortion(inputs) def get_config(self): config = { "height": self.height, "width": self.width, "interpolation": self.interpolation, "crop_to_aspect_ratio": self.crop_to_aspect_ratio, "pad_to_aspect_ratio": self.pad_to_aspect_ratio, "bounding_box_format": self.bounding_box_format, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/layers/preprocessing/resizing.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/resizing.py", "repo_id": "keras-cv", "token_count": 7352 }
13
# Copyright 2022 Waymo LLC. # # Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501 import numpy as np from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d from keras_cv.layers.preprocessing_3d.waymo.frustum_random_dropping_points import ( # noqa: E501 FrustumRandomDroppingPoints, ) from keras_cv.tests.test_case import TestCase POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES class FrustumRandomDroppingPointTest(TestCase): def test_augment_point_clouds_and_bounding_boxes(self): add_layer = FrustumRandomDroppingPoints( r_distance=0, theta_width=1, phi_width=1, drop_rate=0.5 ) point_clouds = np.random.random(size=(2, 50, 10)).astype("float32") bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32") inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes} outputs = add_layer(inputs) self.assertNotAllClose(inputs, outputs) def test_not_augment_drop_rate0_point_clouds_and_bounding_boxes(self): add_layer = FrustumRandomDroppingPoints( r_distance=0, theta_width=1, phi_width=1, drop_rate=0.0 ) point_clouds = np.random.random(size=(2, 50, 10)).astype("float32") bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32") inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes} outputs = add_layer(inputs) self.assertAllClose(inputs, outputs) def test_not_augment_drop_rate1_frustum_empty_point_clouds_and_bounding_boxes( # noqa: E501 self, ): add_layer = FrustumRandomDroppingPoints( r_distance=10, theta_width=0, phi_width=0, drop_rate=1.0 ) point_clouds = np.random.random(size=(2, 50, 10)).astype("float32") bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32") inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes} outputs = add_layer(inputs) self.assertAllClose(inputs, outputs) def test_drop_rate1_large_frustum_drop_all_point_clouds(self): add_layer = FrustumRandomDroppingPoints( r_distance=0, theta_width=np.pi, phi_width=np.pi, drop_rate=1.0 ) point_clouds = np.random.random(size=(2, 50, 10)).astype("float32") bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32") inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes} outputs = add_layer(inputs) self.assertAllClose(inputs[POINT_CLOUDS] * 0.0, outputs[POINT_CLOUDS]) def test_exclude_all_points(self): add_layer = FrustumRandomDroppingPoints( r_distance=0, theta_width=np.pi, phi_width=np.pi, drop_rate=1.0, exclude_classes=1, ) point_clouds = np.random.random(size=(2, 50, 10)).astype("float32") exclude_classes = np.ones(shape=(2, 50, 1)).astype("float32") point_clouds = np.concatenate([point_clouds, exclude_classes], axis=-1) bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32") inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes} outputs = add_layer(inputs) self.assertAllClose(inputs, outputs) def test_exclude_the_first_half_points(self): add_layer = FrustumRandomDroppingPoints( r_distance=0, theta_width=np.pi, phi_width=np.pi, drop_rate=1.0, exclude_classes=[1, 2], ) point_clouds = np.random.random(size=(2, 50, 10)).astype("float32") class_1 = np.ones(shape=(2, 10, 1)).astype("float32") class_2 = np.ones(shape=(2, 15, 1)).astype("float32") * 2 classes = np.concatenate( [class_1, class_2, np.zeros(shape=(2, 25, 1)).astype("float32")], axis=1, ) point_clouds = np.concatenate([point_clouds, classes], axis=-1) bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32") inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes} outputs = add_layer(inputs) self.assertAllClose( inputs[POINT_CLOUDS][:, 25:, :] * 0.0, outputs[POINT_CLOUDS][:, 25:, :], ) self.assertAllClose( inputs[POINT_CLOUDS][:, :25, :], outputs[POINT_CLOUDS][:, :25, :] ) def test_augment_batch_point_clouds_and_bounding_boxes(self): add_layer = FrustumRandomDroppingPoints( r_distance=0, theta_width=1, phi_width=1, drop_rate=0.5 ) point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32") bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32") inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes} outputs = add_layer(inputs) self.assertNotAllClose(inputs, outputs)
keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_dropping_points_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_dropping_points_test.py", "repo_id": "keras-cv", "token_count": 2383 }
14
# Copyright 2022 Waymo LLC. # # Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501 import os import numpy as np import pytest from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d from keras_cv.layers.preprocessing_3d.waymo.random_copy_paste import ( RandomCopyPaste, ) from keras_cv.tests.test_case import TestCase POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES OBJECT_POINT_CLOUDS = base_augmentation_layer_3d.OBJECT_POINT_CLOUDS OBJECT_BOUNDING_BOXES = base_augmentation_layer_3d.OBJECT_BOUNDING_BOXES class RandomCopyPasteTest(TestCase): @pytest.mark.skipif( "TEST_CUSTOM_OPS" not in os.environ or os.environ["TEST_CUSTOM_OPS"] != "true", reason="Requires binaries compiled from source", ) def test_augment_point_clouds_and_bounding_boxes(self): add_layer = RandomCopyPaste( label_index=1, min_paste_bounding_boxes=1, max_paste_bounding_boxes=1, ) # point_clouds: 3D (multi frames) float32 Tensor with shape # [num of frames, num of points, num of point features]. # The first 5 features are [x, y, z, class, range]. point_clouds = np.array( [ [ [0, 1, 2, 3, 4], [10, 1, 2, 3, 4], [0, -1, 2, 3, 4], [100, 100, 2, 3, 4], [0, 0, 0, 0, 0], ] ] * 2 ).astype("float32") # bounding_boxes: 3D (multi frames) float32 Tensor with shape # [num of frames, num of boxes, num of box features]. # The first 8 features are [x, y, z, dx, dy, dz, phi, box class]. bounding_boxes = np.array( [ [ [0, 0, 0, 4, 4, 4, 0, 1], [20, 20, 20, 1, 1, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] ] * 2 ).astype("float32") object_point_clouds = np.array( [ [ [[0, 1, 2, 3, 4], [0, 1, 1, 3, 4]], [[100, 101, 2, 3, 4], [0, 0, 0, 0, 0]], ] ] * 2 ).astype("float32") object_bounding_boxes = np.array( [ [ [0, 0, 1, 4, 4, 4, 0, 1], [100, 100, 2, 5, 5, 5, 0, 1], ] ] * 2 ).astype("float32") inputs = { POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes, OBJECT_POINT_CLOUDS: object_point_clouds, OBJECT_BOUNDING_BOXES: object_bounding_boxes, } outputs = add_layer(inputs) # The first object bounding box [0, 0, 1, 4, 4, 4, 0, 1] overlaps with # existing bounding box [0, 0, 0, 4, 4, 4, 0, 1], thus not used. # The second object bounding box [100, 100, 2, 5, 5, 5, 0, 1] and object # point clouds [100, 101, 2, 3, 4] are pasted. augmented_point_clouds = np.array( [ [ [100, 101, 2, 3, 4], [0, 1, 2, 3, 4], [10, 1, 2, 3, 4], [0, -1, 2, 3, 4], [0, 0, 0, 0, 0], ] ] * 2 ).astype("float32") augmented_bounding_boxes = np.array( [ [ [100, 100, 2, 5, 5, 5, 0, 1], [0, 0, 0, 4, 4, 4, 0, 1], [20, 20, 20, 1, 1, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], ] ] * 2 ).astype("float32") self.assertAllClose( inputs[OBJECT_POINT_CLOUDS], outputs[OBJECT_POINT_CLOUDS] ) self.assertAllClose( inputs[OBJECT_BOUNDING_BOXES], outputs[OBJECT_BOUNDING_BOXES] ) self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds) self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes) @pytest.mark.skipif( "TEST_CUSTOM_OPS" not in os.environ or os.environ["TEST_CUSTOM_OPS"] != "true", reason="Requires binaries compiled from source", ) def test_augment_batch_point_clouds_and_bounding_boxes(self): add_layer = RandomCopyPaste( label_index=1, min_paste_bounding_boxes=1, max_paste_bounding_boxes=1, ) point_clouds = np.array( [ [ [ [0, 1, 2, 3, 4], [10, 1, 2, 3, 4], [0, -1, 2, 3, 4], [100, 100, 2, 3, 4], [0, 0, 0, 0, 0], ] ] * 2 ] * 3 ).astype("float32") bounding_boxes = np.array( [ [ [ [0, 0, 0, 4, 4, 4, 0, 1], [20, 20, 20, 1, 1, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] ] * 2 ] * 3 ).astype("float32") object_point_clouds = np.array( [ [ [ [[0, 1, 2, 3, 4], [0, 1, 1, 3, 4]], [[100, 101, 2, 3, 4], [0, 0, 0, 0, 0]], ] ] * 2 ] * 3 ).astype("float32") object_bounding_boxes = np.array( [ [ [ [0, 0, 1, 4, 4, 4, 0, 1], [100, 100, 2, 5, 5, 5, 0, 1], ] ] * 2 ] * 3 ).astype("float32") inputs = { POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes, OBJECT_POINT_CLOUDS: object_point_clouds, OBJECT_BOUNDING_BOXES: object_bounding_boxes, } outputs = add_layer(inputs) # The first object bounding box [0, 0, 1, 4, 4, 4, 0, 1] overlaps with # existing bounding box [0, 0, 0, 4, 4, 4, 0, 1], thus not used. # The second object bounding box [100, 100, 2, 5, 5, 5, 0, 1] and object # point clouds [100, 101, 2, 3, 4] are pasted. augmented_point_clouds = np.array( [ [ [ [100, 101, 2, 3, 4], [0, 1, 2, 3, 4], [10, 1, 2, 3, 4], [0, -1, 2, 3, 4], [0, 0, 0, 0, 0], ] ] * 2 ] * 3 ).astype("float32") augmented_bounding_boxes = np.array( [ [ [ [100, 100, 2, 5, 5, 5, 0, 1], [0, 0, 0, 4, 4, 4, 0, 1], [20, 20, 20, 1, 1, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], ] ] * 2 ] * 3 ).astype("float32") self.assertAllClose( inputs[OBJECT_POINT_CLOUDS], outputs[OBJECT_POINT_CLOUDS] ) self.assertAllClose( inputs[OBJECT_BOUNDING_BOXES], outputs[OBJECT_BOUNDING_BOXES] ) self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds) self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_copy_paste_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_copy_paste_test.py", "repo_id": "keras-cv", "token_count": 5017 }
15
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any from typing import List from typing import Mapping from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.backend.config import keras_3 @keras_cv_export("keras_cv.layers.SpatialPyramidPooling") class SpatialPyramidPooling(keras.layers.Layer): """Implements the Atrous Spatial Pyramid Pooling. References: [Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1706.05587.pdf) [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf) inp = keras.layers.Input((384, 384, 3)) backbone = keras.applications.EfficientNetB0( input_tensor=inp, include_top=False) output = backbone(inp) output = keras_cv.layers.SpatialPyramidPooling( dilation_rates=[6, 12, 18])(output) # output[4].shape = [None, 16, 16, 256] """ # noqa: E501 def __init__( self, dilation_rates: List[int], num_channels: int = 256, activation: str = "relu", dropout: float = 0.0, **kwargs, ): """Initializes an Atrous Spatial Pyramid Pooling layer. Args: dilation_rates: A `list` of integers for parallel dilated conv. Usually a sample choice of rates are [6, 12, 18]. num_channels: An `int` number of output channels, defaults to 256. activation: A `str` activation to be used, defaults to 'relu'. dropout: A `float` for the dropout rate of the final projection output after the activations and batch norm, defaults to 0.0, which means no dropout is applied to the output. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self.dilation_rates = dilation_rates self.num_channels = num_channels self.activation = activation self.dropout = dropout # TODO(ianstenbit): Remove this once TF 2.14 is released which adds # XLA support for resizing with bilinear interpolation. if keras_3() and keras.backend.backend() == "tensorflow": self.supports_jit = False def build(self, input_shape): channels = input_shape[3] # This is the parallel networks that process the input features with # different dilation rates. The output from each channel will be merged # together and feed to the output. self.aspp_parallel_channels = [] # Channel1 with Conv2D and 1x1 kernel size. conv_sequential = keras.Sequential( [ keras.layers.Conv2D( filters=self.num_channels, kernel_size=(1, 1), use_bias=False, ), keras.layers.BatchNormalization(), keras.layers.Activation(self.activation), ] ) conv_sequential.build(input_shape) self.aspp_parallel_channels.append(conv_sequential) # Channel 2 and afterwards are based on self.dilation_rates, and each of # them will have conv2D with 3x3 kernel size. for dilation_rate in self.dilation_rates: conv_sequential = keras.Sequential( [ keras.layers.Conv2D( filters=self.num_channels, kernel_size=(3, 3), padding="same", dilation_rate=dilation_rate, use_bias=False, ), keras.layers.BatchNormalization(), keras.layers.Activation(self.activation), ] ) conv_sequential.build(input_shape) self.aspp_parallel_channels.append(conv_sequential) # Last channel is the global average pooling with conv2D 1x1 kernel. pool_sequential = keras.Sequential( [ keras.layers.GlobalAveragePooling2D(), keras.layers.Reshape((1, 1, channels)), keras.layers.Conv2D( filters=self.num_channels, kernel_size=(1, 1), use_bias=False, ), keras.layers.BatchNormalization(), keras.layers.Activation(self.activation), ] ) pool_sequential.build(input_shape) self.aspp_parallel_channels.append(pool_sequential) # Final projection layers projection = keras.Sequential( [ keras.layers.Conv2D( filters=self.num_channels, kernel_size=(1, 1), use_bias=False, ), keras.layers.BatchNormalization(), keras.layers.Activation(self.activation), keras.layers.Dropout(rate=self.dropout), ], ) projection_input_channels = ( 2 + len(self.dilation_rates) ) * self.num_channels projection.build(tuple(input_shape[:-1]) + (projection_input_channels,)) self.projection = projection def call(self, inputs, training=None): """Calls the Atrous Spatial Pyramid Pooling layer on an input. Args: inputs: A tensor of shape [batch, height, width, channels] Returns: A tensor of shape [batch, height, width, num_channels] """ result = [] for channel in self.aspp_parallel_channels: temp = ops.cast(channel(inputs, training=training), inputs.dtype) result.append(temp) image_shape = ops.shape(inputs) height, width = image_shape[1], image_shape[2] result[-1] = keras.layers.Resizing( height, width, interpolation="bilinear", )(result[-1]) result = ops.concatenate(result, axis=-1) result = self.projection(result, training=training) return result def compute_output_shape(self, input_shape): return tuple(input_shape[:-1]) + (self.num_channels,) def get_config(self) -> Mapping[str, Any]: config = { "dilation_rates": self.dilation_rates, "num_channels": self.num_channels, "activation": self.activation, "dropout": self.dropout, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/layers/spatial_pyramid.py/0
{ "file_path": "keras-cv/keras_cv/layers/spatial_pyramid.py", "repo_id": "keras-cv", "token_count": 3324 }
16
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.losses.giou_loss import GIoULoss from keras_cv.tests.test_case import TestCase class GIoUTest(TestCase): def test_output_shape(self): y_true = tf.random.uniform( shape=(2, 2, 4), minval=0, maxval=10, dtype=tf.int32 ) y_pred = tf.random.uniform( shape=(2, 2, 4), minval=0, maxval=20, dtype=tf.int32 ) giou_loss = GIoULoss(bounding_box_format="xywh") self.assertAllEqual(giou_loss(y_true, y_pred).shape, ()) def test_output_shape_reduction_none(self): y_true = tf.random.uniform( shape=(2, 2, 4), minval=0, maxval=10, dtype=tf.int32 ) y_pred = tf.random.uniform( shape=(2, 2, 4), minval=0, maxval=20, dtype=tf.int32 ) giou_loss = GIoULoss(bounding_box_format="xywh", reduction="none") self.assertAllEqual( giou_loss(y_true, y_pred).shape, [ 2, ], ) def test_output_shape_relative_formats(self): y_true = [ [0.0, 0.0, 0.1, 0.1], [0.0, 0.0, 0.2, 0.3], [0.4, 0.5, 0.5, 0.6], [0.2, 0.2, 0.3, 0.3], ] y_pred = [ [0.0, 0.0, 0.5, 0.6], [0.0, 0.0, 0.7, 0.3], [0.4, 0.5, 0.5, 0.6], [0.2, 0.1, 0.3, 0.3], ] giou_loss = GIoULoss(bounding_box_format="rel_xyxy") self.assertAllEqual(giou_loss(y_true, y_pred).shape, ()) def test_output_value(self): y_true = [ [0, 0, 1, 1], [0, 0, 2, 3], [4, 5, 3, 6], [2, 2, 3, 3], ] y_pred = [ [0, 0, 5, 6], [0, 0, 7, 3], [4, 5, 5, 6], [2, 1, 3, 3], ] iou_loss = GIoULoss(bounding_box_format="xywh") # expected value for these values is 0.6452381 self.assertAllClose(iou_loss(y_true, y_pred), 0.6452381)
keras-cv/keras_cv/losses/giou_loss_test.py/0
{ "file_path": "keras-cv/keras_cv/losses/giou_loss_test.py", "repo_id": "keras-cv", "token_count": 1347 }
17
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import tensorflow as tf from keras_cv import bounding_box from keras_cv.metrics import BoxCOCOMetrics from keras_cv.tests.test_case import TestCase SAMPLE_FILE = ( os.path.dirname(os.path.abspath(__file__)) + "/test_data/sample_boxes.npz" ) def load_samples(fname): npzfile = np.load(fname) y_true = npzfile["arr_0"].astype(np.float32) y_pred = npzfile["arr_1"].astype(np.float32) y_true = { "boxes": y_true[:, :, :4], "classes": y_true[:, :, 4], } y_pred = { "boxes": y_pred[:, :, :4], "classes": y_pred[:, :, 4], "confidence": y_pred[:, :, 5], } y_true = bounding_box.convert_format(y_true, source="xywh", target="xyxy") y_pred = bounding_box.convert_format(y_pred, source="xywh", target="xyxy") categories = set(int(x) for x in y_true["classes"].flatten()) categories = [x for x in categories if x != -1] return y_true, y_pred, categories golden_metrics = { "MaP": 0.61690974, "MaP@[IoU=50]": 1.0, "MaP@[IoU=75]": 0.70687747, "MaP@[area=small]": 0.6041764, "MaP@[area=medium]": 0.6262922, "MaP@[area=large]": 0.61016285, "Recall@[max_detections=1]": 0.47804594, "Recall@[max_detections=10]": 0.6451851, "Recall@[max_detections=100]": 0.6484465, "Recall@[area=small]": 0.62842655, "Recall@[area=medium]": 0.65336424, "Recall@[area=large]": 0.6405466, } class BoxCOCOMetricsTest(TestCase): def test_coco_metric_suite_returns_all_coco_metrics(self): suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=1) y_true, y_pred, categories = load_samples(SAMPLE_FILE) suite.update_state(y_true, y_pred) metrics = suite.result() for metric_name, metric_value in metrics.items(): self.assertEqual(metric_value, golden_metrics[metric_name]) def test_coco_metric_suite_evaluate_freq(self): suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=2) y_true, y_pred, categories = load_samples(SAMPLE_FILE) suite.update_state(y_true, y_pred) metrics = suite.result() self.assertAllEqual(metrics, {key: 0 for key in golden_metrics}) suite.update_state(y_true, y_pred) metrics = suite.result() # for metric in metrics: # The metrics do not match golden metrics because two batches were # passed which actually modifies the final area under curve value. self.assertNotEqual(metrics[metric], 0.0) def test_coco_metric_graph_mode(self): suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=1) y_true, y_pred, categories = load_samples(SAMPLE_FILE) @tf.function() def update_state(y_true, y_pred): suite.update_state(y_true, y_pred) @tf.function() def result(): return suite.result() metrics = result() self.assertAllEqual(metrics, {key: 0 for key in golden_metrics}) update_state(y_true, y_pred) metrics = result() for metric in metrics: self.assertNotEqual(metrics[metric], 0.0) def test_coco_metric_suite_force_eval(self): suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=512) y_true, y_pred, categories = load_samples(SAMPLE_FILE) suite.update_state(y_true, y_pred) metrics = suite.result() self.assertAllEqual(metrics, {key: 0 for key in golden_metrics}) suite.update_state(y_true, y_pred) metrics = suite.result(force=True) for metric in metrics: # The metrics do not match golden metrics because two batches were # passed which actually modifies the final area under curve value. self.assertNotEqual(metrics[metric], 0.0) def test_name_parameter(self): suite = BoxCOCOMetrics( bounding_box_format="xyxy", evaluate_freq=1, name="coco_metrics" ) y_true, y_pred, categories = load_samples(SAMPLE_FILE) suite.update_state(y_true, y_pred) metrics = suite.result() for metric in golden_metrics: self.assertAlmostEqual( metrics["coco_metrics_" + metric], golden_metrics[metric] ) def test_coco_metric_suite_ragged_prediction(self): suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=1) ragged_bounding_boxes = { # shape: (2, (2, 1), 4) "boxes": tf.ragged.constant( [ [[10, 10, 20, 20], [100, 100, 150, 150]], # small, medium [[200, 200, 400, 400]], # large ], ragged_rank=1, dtype=tf.float32, ), "classes": tf.ragged.constant( [[0, 1], [2]], ragged_rank=1, dtype=tf.float32, ), "confidence": tf.ragged.constant( [[0.7, 0.8], [0.9]], ragged_rank=1, dtype=tf.float32, ), } different_ragged_bounding_boxes = { # shape: (2, (2, 3), 4) "boxes": tf.ragged.constant( [ [[10, 10, 25, 25], [100, 105, 155, 155]], [[200, 200, 450, 450], [1, 1, 5, 5], [50, 50, 300, 300]], ], ragged_rank=1, dtype=tf.float32, ), "classes": tf.ragged.constant( [[0, 1], [2, 3, 3]], ragged_rank=1, dtype=tf.float32, ), "confidence": tf.ragged.constant( [[0.7, 0.8], [0.9, 0.7, 0.7]], ragged_rank=1, dtype=tf.float32, ), } suite.update_state( ragged_bounding_boxes, bounding_box.to_dense(ragged_bounding_boxes), ) metrics = suite.result() for metric in metrics: # The metrics will be all 1.0 because the predictions and ground # truth values are identical. self.assertEqual(metrics[metric], 1.0) suite.reset_state() suite.update_state( ragged_bounding_boxes, bounding_box.to_dense(different_ragged_bounding_boxes), ) metrics = suite.result() for metric in metrics: # The metrics will not be 1.0 because the predictions and ground # truth values are completely different. self.assertNotEqual(metrics[metric], 1.0) def test_coco_metric_suite_ragged_labels(self): suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=1) ragged_bounding_boxes = { # shape: (2, (2, 1), 4) "boxes": tf.ragged.constant( [ [[10, 10, 20, 20], [100, 100, 150, 150]], # small, medium [[200, 200, 400, 400]], # large ], ragged_rank=1, dtype=tf.float32, ), "classes": tf.ragged.constant( [[0, 1], [2]], ragged_rank=1, dtype=tf.float32, ), "confidence": tf.ragged.constant( [[0.7, 0.8], [0.9]], ragged_rank=1, dtype=tf.float32, ), } different_ragged_bounding_boxes = { # shape: (2, (2, 3), 4) "boxes": tf.ragged.constant( [ [[10, 10, 25, 25], [100, 105, 155, 155]], [[200, 200, 450, 450], [1, 1, 5, 5], [50, 50, 300, 300]], ], ragged_rank=1, dtype=tf.float32, ), "classes": tf.ragged.constant( [[0, 1], [2, 3, 3]], ragged_rank=1, dtype=tf.float32, ), "confidence": tf.ragged.constant( [[0.7, 0.8], [0.9, 0.7, 0.7]], ragged_rank=1, dtype=tf.float32, ), } suite.update_state(ragged_bounding_boxes, ragged_bounding_boxes) metrics = suite.result() for metric in metrics: # The metrics will be all 1.0 because the predictions and ground # truth values are identical. self.assertEqual(metrics[metric], 1.0) suite.reset_state() suite.update_state( ragged_bounding_boxes, different_ragged_bounding_boxes ) metrics = suite.result() for metric in metrics: # The metrics will not be 1.0 because the predictions and ground # truth values are completely different. self.assertNotEqual(metrics[metric], 1.0)
keras-cv/keras_cv/metrics/object_detection/box_coco_metrics_test.py/0
{ "file_path": "keras-cv/keras_cv/metrics/object_detection/box_coco_metrics_test.py", "repo_id": "keras-cv", "token_count": 4825 }
18
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DenseNet model preset configurations.""" backbone_presets_no_weights = { "densenet121": { "metadata": { "description": "DenseNet model with 121 layers.", }, "kaggle_handle": "kaggle://keras/densenet/keras/densenet121/2", }, "densenet169": { "metadata": { "description": "DenseNet model with 169 layers.", }, "kaggle_handle": "kaggle://keras/densenet/keras/densenet169/2", }, "densenet201": { "metadata": { "description": "DenseNet model with 201 layers.", }, "kaggle_handle": "kaggle://keras/densenet/keras/densenet201/2", }, } backbone_presets_with_weights = { "densenet121_imagenet": { "metadata": { "description": ( "DenseNet model with 121 layers. Trained on Imagenet 2012 " "classification task." ), }, "kaggle_handle": "kaggle://keras/densenet/keras/densenet121_imagenet/2", }, "densenet169_imagenet": { "metadata": { "description": ( "DenseNet model with 169 layers. Trained on Imagenet 2012 " "classification task." ), }, "kaggle_handle": "kaggle://keras/densenet/keras/densenet169_imagenet/2", }, "densenet201_imagenet": { "metadata": { "description": ( "DenseNet model with 201 layers. Trained on Imagenet 2012 " "classification task." ), }, "kaggle_handle": "kaggle://keras/densenet/keras/densenet201_imagenet/2", }, } backbone_presets = { **backbone_presets_no_weights, **backbone_presets_with_weights, }
keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_presets.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_presets.py", "repo_id": "keras-cv", "token_count": 1022 }
19
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_cv.api_export import keras_cv_export from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone import ( EfficientNetV2Backbone, ) from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone_presets import ( # noqa: E501 backbone_presets, ) from keras_cv.utils.python_utils import classproperty ALIAS_DOCSTRING = """Instantiates the {name} architecture. Reference: - [EfficientNetV2: Smaller Models and Faster Training](https://arxiv.org/abs/2104.00298) (ICML 2021) Args: include_rescaling: bool, whether to rescale the inputs. If set to `True`, inputs will be passed through a `Rescaling(1/255.0)` layer. input_shape: optional shape tuple, defaults to (None, None, 3). input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. """ # noqa: E501 @keras_cv_export("keras_cv.models.EfficientNetV2SBackbone") class EfficientNetV2SBackbone(EfficientNetV2Backbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return EfficientNetV2Backbone.from_preset("efficientnetv2_s", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "efficientnetv2_s_imagenet": copy.deepcopy( backbone_presets["efficientnetv2_s_imagenet"] ), } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets @keras_cv_export("keras_cv.models.EfficientNetV2MBackbone") class EfficientNetV2MBackbone(EfficientNetV2Backbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return EfficientNetV2Backbone.from_preset("efficientnetv2_m", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return {} @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return {} @keras_cv_export("keras_cv.models.EfficientNetV2LBackbone") class EfficientNetV2LBackbone(EfficientNetV2Backbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return EfficientNetV2Backbone.from_preset("efficientnetv2_l", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return {} @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return {} @keras_cv_export("keras_cv.models.EfficientNetV2B0Backbone") class EfficientNetV2B0Backbone(EfficientNetV2Backbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return EfficientNetV2Backbone.from_preset("efficientnetv2_b0", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "efficientnetv2_b0_imagenet": copy.deepcopy( backbone_presets["efficientnetv2_b0_imagenet"] ), } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets @keras_cv_export("keras_cv.models.EfficientNetV2B1Backbone") class EfficientNetV2B1Backbone(EfficientNetV2Backbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return EfficientNetV2Backbone.from_preset("efficientnetv2_b1", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "efficientnetv2_b1_imagenet": copy.deepcopy( backbone_presets["efficientnetv2_b1_imagenet"] ), } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets @keras_cv_export("keras_cv.models.EfficientNetV2B2Backbone") class EfficientNetV2B2Backbone(EfficientNetV2Backbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return EfficientNetV2Backbone.from_preset("efficientnetv2_b2", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "efficientnetv2_b2_imagenet": copy.deepcopy( backbone_presets["efficientnetv2_b2_imagenet"] ), } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets @keras_cv_export("keras_cv.models.EfficientNetV2B3Backbone") class EfficientNetV2B3Backbone(EfficientNetV2Backbone): def __new__( cls, include_rescaling=True, input_shape=(None, None, 3), input_tensor=None, **kwargs, ): # Pack args in kwargs kwargs.update( { "include_rescaling": include_rescaling, "input_shape": input_shape, "input_tensor": input_tensor, } ) return EfficientNetV2Backbone.from_preset("efficientnetv2_b3", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return {} @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return {} setattr( EfficientNetV2SBackbone, "__doc__", ALIAS_DOCSTRING.format(name="EfficientNetV2S"), ) setattr( EfficientNetV2MBackbone, "__doc__", ALIAS_DOCSTRING.format(name="EfficientNetV2M"), ) setattr( EfficientNetV2LBackbone, "__doc__", ALIAS_DOCSTRING.format(name="EfficientNetV2L"), ) setattr( EfficientNetV2B0Backbone, "__doc__", ALIAS_DOCSTRING.format(name="EfficientNetV2B0"), ) setattr( EfficientNetV2B1Backbone, "__doc__", ALIAS_DOCSTRING.format(name="EfficientNetV2B1"), ) setattr( EfficientNetV2B2Backbone, "__doc__", ALIAS_DOCSTRING.format(name="EfficientNetV2B2"), ) setattr( EfficientNetV2B3Backbone, "__doc__", ALIAS_DOCSTRING.format(name="EfficientNetV2B3"), )
keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_aliases.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_aliases.py", "repo_id": "keras-cv", "token_count": 4141 }
20
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import pytest from absl.testing import parameterized from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_aliases import ( MobileNetV3SmallBackbone, ) from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone import ( MobileNetV3Backbone, ) from keras_cv.tests.test_case import TestCase from keras_cv.utils.train import get_feature_extractor class MobileNetV3BackboneTest(TestCase): def setUp(self): self.input_batch = np.ones(shape=(2, 224, 224, 3)) def test_valid_call(self): model = MobileNetV3SmallBackbone( include_rescaling=False, ) model(self.input_batch) def test_valid_call_with_rescaling(self): model = MobileNetV3SmallBackbone( include_rescaling=True, ) model(self.input_batch) @pytest.mark.large # Saving is slow, so mark these large. def test_saved_model(self): model = MobileNetV3SmallBackbone() model_output = model(self.input_batch) save_path = os.path.join( self.get_temp_dir(), "mobilenet_v3_backbone.keras" ) model.save(save_path) restored_model = keras.models.load_model(save_path) # Check we got the real object back. self.assertIsInstance(restored_model, MobileNetV3Backbone) # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose( ops.convert_to_numpy(model_output), ops.convert_to_numpy(restored_output), ) def test_feature_pyramid_inputs(self): model = MobileNetV3SmallBackbone() backbone_model = get_feature_extractor( model, model.pyramid_level_inputs.values(), model.pyramid_level_inputs.keys(), ) input_size = 256 inputs = keras.Input(shape=[input_size, input_size, 3]) outputs = backbone_model(inputs) levels = ["P1", "P2", "P3", "P4", "P5"] self.assertEquals(list(outputs.keys()), levels) self.assertEquals( outputs["P1"].shape, (None, input_size // 2**1, input_size // 2**1, 16), ) self.assertEquals( outputs["P2"].shape, (None, input_size // 2**2, input_size // 2**2, 16), ) self.assertEquals( outputs["P3"].shape, (None, input_size // 2**3, input_size // 2**3, 24), ) self.assertEquals( outputs["P4"].shape, (None, input_size // 2**4, input_size // 2**4, 48), ) self.assertEquals( outputs["P5"].shape, (None, input_size // 2**5, input_size // 2**5, 96), ) @parameterized.named_parameters( ("one_channel", 1), ("four_channels", 4), ) def test_application_variable_input_channels(self, num_channels): model = MobileNetV3SmallBackbone( input_shape=(None, None, num_channels), include_rescaling=False, ) self.assertEqual(model.output_shape, (None, None, None, 576))
keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_test.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_test.py", "repo_id": "keras-cv", "token_count": 1656 }
21
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import pytest from keras_cv.backend import keras from keras_cv.models import VGG16Backbone from keras_cv.tests.test_case import TestCase class VGG16BackboneTest(TestCase): def setUp(self): self.img_input = np.ones((2, 224, 224, 3), dtype="float32") def test_valid_call(self): model = VGG16Backbone( input_shape=(224, 224, 3), include_top=False, include_rescaling=False, pooling="avg", ) model(self.img_input) def test_valid_call_with_rescaling(self): model = VGG16Backbone( input_shape=(224, 224, 3), include_top=False, include_rescaling=True, pooling="avg", ) model(self.img_input) def test_valid_call_with_top(self): model = VGG16Backbone( input_shape=(224, 224, 3), include_top=True, include_rescaling=False, num_classes=2, ) model(self.img_input) @pytest.mark.large def test_saved_model(self): model = VGG16Backbone( input_shape=(224, 224, 3), include_top=False, include_rescaling=False, num_classes=2, pooling="avg", ) model_output = model(self.img_input) save_path = os.path.join(self.get_temp_dir(), "vgg16.keras") model.save(save_path) restored_model = keras.models.load_model(save_path) # Check the restored model is instance of VGG16Backbone self.assertIsInstance(restored_model, VGG16Backbone) # Check if the restored model gives the same output restored_model_output = restored_model(self.img_input) self.assertAllClose(model_output, restored_model_output)
keras-cv/keras_cv/models/backbones/vgg16/vgg16_backbone_test.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/vgg16/vgg16_backbone_test.py", "repo_id": "keras-cv", "token_count": 1023 }
22
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.models.feature_extractor.clip.clip_image_model import ( CLIPImageEncoder, ) from keras_cv.models.feature_extractor.clip.clip_presets import ( # noqa: E501 clip_presets, ) from keras_cv.models.feature_extractor.clip.clip_text_model import ( CLIPTextEncoder, ) from keras_cv.models.task import Task from keras_cv.utils.python_utils import classproperty try: import keras_nlp except ImportError: keras_nlp = None @keras_cv_export(["keras_cv.models.CLIP"]) class CLIP(Task): """ CLIP implements the Contrastive Language-Image Pretraining (CLIP) architecture, which enables joint learning of visual and textual representations for various downstream tasks. The deafult base model achitecture will be set to clip-vit-base-patch32. Args: embed_dim (int): The dimensionality of the joint embedding space for images and texts. image_resolution (int): The resolution of the input images (both height and width). vision_layers (int): The number of layers in the vision (image) encoder. vision_width (int): The width of the hidden layers in the vision encoder. vision_patch_size (int): The size of each square patch in the input images. context_length (int): The maximum length of the contextualized text sequences. vocab_size (int): The size of the vocabulary for tokenization. transformer_width (int): The width of the hidden layers in the transformer-based text encoder. transformer_heads (int): The number of attention heads in the transformer-based text encoder. transformer_layers (int): The number of layers in the transformer-based text encoder. """ def __init__( self, embed_dim=512, image_resolution=224, vision_layers=12, vision_width=768, vision_patch_size=32, context_length=77, vocab_size=49408, transformer_width=768, transformer_heads=8, transformer_layers=12, **kwargs, ): super().__init__(**kwargs) if keras_nlp is None: raise ValueError( "ClipTokenizer requires keras-nlp. Please install " "using pip `pip install -U keras-nlp && pip install -U keras`" ) self.embed_dim = embed_dim self.image_resolution = image_resolution self.vision_layers = vision_layers self.vision_width = vision_width self.vision_patch_size = vision_patch_size self.context_length = context_length self.vocab_size = vocab_size self.transformer_width = transformer_width self.transformer_heads = transformer_heads self.transformer_layers = transformer_layers vision_heads = self.vision_width // 64 self.image_encoder = CLIPImageEncoder( input_resolution=self.image_resolution, patch_size=self.vision_patch_size, width=self.vision_width, num_layers=self.vision_layers, heads=vision_heads, output_dim=self.embed_dim, name="image_encoder", ) self.text_encoder = CLIPTextEncoder( transformer_width=self.transformer_width, transformer_layers=self.transformer_layers, transformer_heads=self.transformer_heads, vocab_size=self.vocab_size, embed_dim=self.embed_dim, context_length=self.context_length, name="text_encoder", ) self.logit_scale = keras.Variable( ops.ones([]) * ops.log(1 / 0.07), name="logit_scale" ) self.image_embeddings = None self.text_embeddings = None def build(self, input_shape): super().build(input_shape) self.text_encoder.build([None, self.context_length]) self.image_encoder.build( [None, self.image_resolution, self.image_resolution, 3] ) def encode_images(self, image): return self.image_encoder(image) def encode_text(self, text, attention_mask=None): return self.text_encoder(text, attention_mask=attention_mask) def call(self, image, text, attention_mask=None): self.image_embeddings = self.encode_images(image) self.text_embeddings = self.encode_text( text, attention_mask=attention_mask ) normalize_image_features = ops.sqrt( ops.sum(ops.power(self.image_embeddings, 2), keepdims=True) ) normalize_text_features = ops.sqrt( ops.sum(ops.power(self.text_embeddings, 2), keepdims=True) ) self.image_embeddings = self.image_embeddings / normalize_image_features self.text_embeddings = self.text_embeddings / normalize_text_features logit_scale = ops.exp(self.logit_scale) logits_per_image = ( ops.matmul( self.image_embeddings, ops.transpose(self.text_embeddings), ) * logit_scale ) logits_per_text = ops.transpose(logits_per_image) return logits_per_image, logits_per_text @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return copy.deepcopy({**clip_presets}) @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return copy.deepcopy({**clip_presets}) def get_config(self): config = super().get_config() config.update( { "embed_dim": self.embed_dim, "image_resolution": self.image_resolution, "vision_layers": self.vision_layers, "vision_width": self.vision_width, "vision_patch_size": self.vision_patch_size, "context_length": self.context_length, "vocab_size": self.vocab_size, "transformer_width": self.transformer_width, "transformer_heads": self.transformer_heads, "transformer_layers": self.transformer_layers, } ) return config
keras-cv/keras_cv/models/feature_extractor/clip/clip_model.py/0
{ "file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_model.py", "repo_id": "keras-cv", "token_count": 2998 }
23
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Integration tests for KerasCV models.""" import os import pytest import tensorflow as tf from tensorflow import keras from tensorflow.keras import backend class ModelsTest: def assertShapeEqual(self, shape1, shape2): self.assertEqual(tf.TensorShape(shape1), tf.TensorShape(shape2)) @pytest.fixture(autouse=True) def cleanup_global_session(self): # Code before yield runs before the test yield keras.backend.clear_session() def _test_application_base(self, app, _, args): # Can be instantiated with default arguments model = app( include_top=True, num_classes=10, include_rescaling=False, **args ) # Can be serialized and deserialized config = model.get_config() reconstructed_model = model.__class__.from_config(config) self.assertEqual(len(model.weights), len(reconstructed_model.weights)) # There is no rescaling layer bcause include_rescaling=False with self.assertRaises(ValueError): model.get_layer(name="rescaling") def _test_application_with_rescaling(self, app, last_dim, args): model = app(include_rescaling=True, include_top=False, **args) self.assertIsNotNone(model.get_layer(name="rescaling")) def _test_application_pooling(self, app, last_dim, args): model = app( include_rescaling=False, include_top=False, pooling="avg", **args ) self.assertShapeEqual(model.output_shape, (None, last_dim)) def _test_application_variable_input_channels(self, app, last_dim, args): # Make a local copy of args because we modify them in the test args = dict(args) input_shape = (None, None, 3) # Avoid passing this parameter twice to the app function if "input_shape" in args: input_shape = args["input_shape"] del args["input_shape"] single_channel_input_shape = (input_shape[0], input_shape[1], 1) model = app( include_rescaling=False, include_top=False, input_shape=single_channel_input_shape, **args ) output_shape = model.output_shape if "Mixer" not in app.__name__ and "ViT" not in app.__name__: self.assertShapeEqual(output_shape, (None, None, None, last_dim)) elif "MixerB16" in app.__name__ or "MixerL16" in app.__name__: num_patches = 196 self.assertShapeEqual(output_shape, (None, num_patches, last_dim)) elif "MixerB32" in app.__name__: num_patches = 49 self.assertShapeEqual(output_shape, (None, num_patches, last_dim)) elif ( "ViTTiny16" in app.__name__ or "ViTS16" in app.__name__ or "ViTB16" in app.__name__ or "ViTL16" in app.__name__ or "ViTH16" in app.__name__ ): num_patches = 197 self.assertShapeEqual(output_shape, (None, num_patches, last_dim)) elif ( "ViTTiny32" in app.__name__ or "ViTS32" in app.__name__ or "ViTB32" in app.__name__ or "ViTL32" in app.__name__ or "ViTH32" in app.__name__ ): num_patches = 50 self.assertShapeEqual(output_shape, (None, num_patches, last_dim)) backend.clear_session() four_channel_input_shape = (input_shape[0], input_shape[1], 4) model = app( include_rescaling=False, include_top=False, input_shape=four_channel_input_shape, **args ) output_shape = model.output_shape if "Mixer" not in app.__name__ and "ViT" not in app.__name__: self.assertShapeEqual(output_shape, (None, None, None, last_dim)) elif "MixerB16" in app.__name__ or "MixerL16" in app.__name__: num_patches = 196 self.assertShapeEqual(output_shape, (None, num_patches, last_dim)) elif "MixerB32" in app.__name__: num_patches = 49 self.assertShapeEqual(output_shape, (None, num_patches, last_dim)) elif ( "ViTTiny16" in app.__name__ or "ViTS16" in app.__name__ or "ViTB16" in app.__name__ or "ViTL16" in app.__name__ or "ViTH16" in app.__name__ ): num_patches = 197 self.assertShapeEqual(output_shape, (None, num_patches, last_dim)) elif ( "ViTTiny32" in app.__name__ or "ViTS32" in app.__name__ or "ViTB32" in app.__name__ or "ViTL32" in app.__name__ or "ViTH32" in app.__name__ ): num_patches = 50 self.assertShapeEqual(output_shape, (None, num_patches, last_dim)) def _test_model_can_be_used_as_backbone(self, app, last_dim, args): inputs = keras.layers.Input(shape=(224, 224, 3)) backbone = app( include_rescaling=False, include_top=False, input_tensor=inputs, pooling="avg", **args ) x = inputs x = backbone(x) backbone_output = backbone.get_layer(index=-1).output model = keras.Model(inputs=inputs, outputs=[backbone_output]) model.compile() @pytest.mark.large # Saving is slow, so mark these large. def _test_model_serialization(self, app, _, args, save_format, filename): model = app(include_rescaling=True, include_top=False, **args) input_batch = tf.ones(shape=(16, 224, 224, 3)) model_output = model(input_batch) save_path = os.path.join(self.get_temp_dir(), filename) model.save(save_path, save_format=save_format) restored_model = keras.models.load_model(save_path) # Check that output matches. restored_output = restored_model(input_batch) self.assertAllClose(model_output, restored_output) if __name__ == "__main__": tf.test.main()
keras-cv/keras_cv/models/legacy/models_test.py/0
{ "file_path": "keras-cv/keras_cv/models/legacy/models_test.py", "repo_id": "keras-cv", "token_count": 2989 }
24
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import tensorflow as tf from tensorflow.keras import utils def parse_weights(weights, include_top, model_type): if not weights: return weights if weights.startswith("gs://"): weights = weights.replace("gs://", "https://storage.googleapis.com/") return utils.get_file( origin=weights, cache_subdir="models", ) if tf.io.gfile.exists(weights): return weights if weights in ALIASES[model_type]: weights = ALIASES[model_type][weights] if weights in WEIGHTS_CONFIG[model_type]: if not include_top: weights = weights + "-notop" return utils.get_file( origin=f"{BASE_PATH}/{model_type}/{weights}.h5", cache_subdir="models", file_hash=WEIGHTS_CONFIG[model_type][weights], ) raise ValueError( "The `weights` argument should be either `None`, a the path to the " "weights file to be loaded, or the name of pre-trained weights from " "https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/weights.py. " # noqa: E501 f"Invalid `weights` argument: {weights}" ) BASE_PATH = "https://storage.googleapis.com/keras-cv/models" ALIASES = { "convmixer_512_16": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "cspdarknetl": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "cspdarknettiny": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "darknet53": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "deeplabv3": { "voc": "voc/segmentation-v0", }, "densenet121": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "densenet169": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "densenet201": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "resnet50": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "resnet50v2": { "imagenet": "imagenet/classification-v2", "imagenet/classification": "imagenet/classification-v2", }, "vittiny16": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "vits16": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "vitb16": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "vitl16": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "vits32": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, "vitb32": { "imagenet": "imagenet/classification-v0", "imagenet/classification": "imagenet/classification-v0", }, } WEIGHTS_CONFIG = { "convmixer_512_16": { "imagenet/classification-v0": "861f3080dc383f7936d3df89691aadea05eee6acaa4a0b60aa70dd657df915ee", # noqa: E501 "imagenet/classification-v0-notop": "aa08c7fa9ca6ec045c4783e1248198dbe1bc141e2ae788e712de471c0370822c", # noqa: E501 }, "cspdarknetl": { "imagenet/classification-v0": "8bdc3359222f0d26f77aa42c4e97d67a05a1431fe6c448ceeab9a9c5a34ff804", # noqa: E501 "imagenet/classification-v0-notop": "9303aabfadffbff8447171fce1e941f96d230d8f3cef30d3f05a9c85097f8f1e", # noqa: E501 }, "cspdarknettiny": { "imagenet/classification-v0": "c17fe6d7b597f2eb25e42fbd97ec58fb1dad753ba18920cc27820953b7947704", # noqa: E501 "imagenet/classification-v0-notop": "0007ae82c95be4d4aef06368a7c38e006381324d77e5df029b04890e18a8ad19", # noqa: E501 }, "darknet53": { "imagenet/classification-v0": "7bc5589f7f7f7ee3878e61ab9323a71682bfb617eb57f530ca8757c742f00c77", # noqa: E501 "imagenet/classification-v0-notop": "8dcce43163e4b4a63e74330ba1902e520211db72d895b0b090b6bfe103e7a8a5", # noqa: E501 }, "deeplabv3": { "voc/segmentation-v0": "732042e8b6c9ddba3d51c861f26dc41865187e9f85a0e5d43dfef75a405cca18", # noqa: E501 }, "densenet121": { "imagenet/classification-v0": "13de3d077ad9d9816b9a0acc78215201d9b6e216c7ed8e71d69cc914f8f0775b", # noqa: E501 "imagenet/classification-v0-notop": "709afe0321d9f2b2562e562ff9d0dc44cca10ed09e0e2cfba08d783ff4dab6bf", # noqa: E501 }, "densenet169": { "imagenet/classification-v0": "4cd2a661d0cb2378574073b23129ee4d06ea53c895c62a8863c44ee039e236a1", # noqa: E501 "imagenet/classification-v0-notop": "a99d1bb2cbe1a59a1cdd1f435fb265453a97c2a7b723d26f4ebee96e5fb49d62", # noqa: E501 }, "densenet201": { "imagenet/classification-v0": "3b6032e744e5e5babf7457abceaaba11fcd449fe2d07016ae5076ac3c3c6cf0c", # noqa: E501 "imagenet/classification-v0-notop": "c1189a934f12c1a676a9cf52238e5994401af925e2adfc0365bad8133c052060", # noqa: E501 }, "resnet50": { "imagenet/classification-v0": "1525dc1ce580239839ba6848c0f1b674dc89cb9ed73c4ed49eba355b35eac3ce", # noqa: E501 "imagenet/classification-v0-notop": "dc5f6d8f929c78d0fc192afecc67b11ac2166e9d8b9ef945742368ae254c07af", # noqa: E501 }, "resnet50v2": { "imagenet/classification-v0": "11bde945b54d1dca65101be2648048abca8a96a51a42820d87403486389790db", # noqa: E501 "imagenet/classification-v0-notop": "5b4aca4932c433d84f6aef58135472a4312ed2fa565d53fedcd6b0c24b54ab4a", # noqa: E501 "imagenet/classification-v1": "a32e5d9998e061527f6f947f36d8e794ad54dad71edcd8921cda7804912f3ee7", # noqa: E501 "imagenet/classification-v1-notop": "ac46b82c11070ab2f69673c41fbe5039c9eb686cca4f34cd1d79412fd136f1ae", # noqa: E501 "imagenet/classification-v2": "5ee5a8ac650aaa59342bc48ffe770e6797a5550bcc35961e1d06685292c15921", # noqa: E501 "imagenet/classification-v2-notop": "e711c83d6db7034871f6d345a476c8184eab99dbf3ffcec0c1d8445684890ad9", # noqa: E501 }, "vittiny16": { "imagenet/classification-v0": "c8227fde16ec8c2e7ab886169b11b4f0ca9af2696df6d16767db20acc9f6e0dd", # noqa: E501 "imagenet/classification-v0-notop": "aa4d727e3c6bd30b20f49d3fa294fb4bbef97365c7dcb5cee9c527e4e83c8f5b", # noqa: E501 }, "vits16": { "imagenet/classification-v0": "4a66a1a70a879ff33a3ca6ca30633b9eadafea84b421c92174557eee83e088b5", # noqa: E501 "imagenet/classification-v0-notop": "8d0111eda6692096676a5453abfec5d04c79e2de184b04627b295f10b1949745", # noqa: E501 }, "vitb16": { "imagenet/classification-v0": "6ab4e08c773e08de42023d963a97e905ccba710e2c05ef60c0971978d4a8c41b", # noqa: E501 "imagenet/classification-v0-notop": "4a1bdd32889298471cb4f30882632e5744fd519bf1a1525b1fa312fe4ea775ed", # noqa: E501 }, "vitl16": { "imagenet/classification-v0": "5a98000f848f2e813ea896b2528983d8d956f8c4b76ceed0b656219d5b34f7fb", # noqa: E501 "imagenet/classification-v0-notop": "40d237c44f14d20337266fce6192c00c2f9b890a463fd7f4cb17e8e35b3f5448", # noqa: E501 }, "vits32": { "imagenet/classification-v0": "f5836e3aff2bab202eaee01d98337a08258159d3b718e0421834e98b3665e10a", # noqa: E501 "imagenet/classification-v0-notop": "f3907845eff780a4d29c1c56e0ae053411f02fff6fdce1147c4c3bb2124698cd", # noqa: E501 }, "vitb32": { "imagenet/classification-v0": "73025caa78459dc8f9b1de7b58f1d64e24a823f170d17e25fcc8eb6179bea179", # noqa: E501 "imagenet/classification-v0-notop": "f07b80c03336d731a2a3a02af5cac1e9fc9aa62659cd29e2e7e5c7474150cc71", # noqa: E501 }, }
keras-cv/keras_cv/models/legacy/weights.py/0
{ "file_path": "keras-cv/keras_cv/models/legacy/weights.py", "repo_id": "keras-cv", "token_count": 4397 }
25
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import warnings from keras_cv import bounding_box from keras_cv import layers from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.losses.ciou_loss import CIoULoss from keras_cv.models.backbones.backbone_presets import backbone_presets from keras_cv.models.backbones.backbone_presets import ( backbone_presets_with_weights, ) from keras_cv.models.object_detection.__internal__ import unpack_input from keras_cv.models.object_detection.yolo_v8.yolo_v8_detector_presets import ( yolo_v8_detector_presets, ) from keras_cv.models.object_detection.yolo_v8.yolo_v8_label_encoder import ( YOLOV8LabelEncoder, ) from keras_cv.models.object_detection.yolo_v8.yolo_v8_layers import ( apply_conv_bn, ) from keras_cv.models.object_detection.yolo_v8.yolo_v8_layers import ( apply_csp_block, ) from keras_cv.models.task import Task from keras_cv.utils.python_utils import classproperty from keras_cv.utils.train import get_feature_extractor BOX_REGRESSION_CHANNELS = 64 def get_anchors( image_shape, strides=[8, 16, 32], base_anchors=[0.5, 0.5], ): """Gets anchor points for YOLOV8. YOLOV8 uses anchor points representing the center of proposed boxes, and matches ground truth boxes to anchors based on center points. Args: image_shape: tuple or list of two integers representing the height and width of input images, respectively. strides: tuple of list of integers, the size of the strides across the image size that should be used to create anchors. base_anchors: tuple or list of two integers representing the offset from (0,0) to start creating the center of anchor boxes, relative to the stride. For example, using the default (0.5, 0.5) creates the first anchor box for each stride such that its center is half of a stride from the edge of the image. Returns: A tuple of anchor centerpoints and anchor strides. Multiplying the two together will yield the centerpoints in absolute x,y format. """ base_anchors = ops.array(base_anchors, dtype="float32") all_anchors = [] all_strides = [] for stride in strides: hh_centers = ops.arange(0, image_shape[0], stride) ww_centers = ops.arange(0, image_shape[1], stride) ww_grid, hh_grid = ops.meshgrid(ww_centers, hh_centers) grid = ops.cast( ops.reshape(ops.stack([hh_grid, ww_grid], 2), [-1, 1, 2]), "float32", ) anchors = ( ops.expand_dims( base_anchors * ops.array([stride, stride], "float32"), 0 ) + grid ) anchors = ops.reshape(anchors, [-1, 2]) all_anchors.append(anchors) all_strides.append(ops.repeat(stride, anchors.shape[0])) all_anchors = ops.cast(ops.concatenate(all_anchors, axis=0), "float32") all_strides = ops.cast(ops.concatenate(all_strides, axis=0), "float32") all_anchors = all_anchors / all_strides[:, None] # Swap the x and y coordinates of the anchors. all_anchors = ops.concatenate( [all_anchors[:, 1, None], all_anchors[:, 0, None]], axis=-1 ) return all_anchors, all_strides def apply_path_aggregation_fpn(features, depth=3, name="fpn"): """Applies the Feature Pyramid Network (FPN) to the outputs of a backbone. Args: features: list of tensors representing the P3, P4, and P5 outputs of the backbone. depth: integer, the depth of the CSP blocks used in the FPN. name: string, a prefix for names of layers used by the FPN. Returns: A list of three tensors whose shapes are the same as the three inputs, but which are dependent on each of the three inputs to combine the high resolution of the P3 inputs with the strong feature representations of the P5 inputs. """ p3, p4, p5 = features # Upsample P5 and concatenate with P4, then apply a CSPBlock. p5_upsampled = ops.repeat(ops.repeat(p5, 2, axis=1), 2, axis=2) p4p5 = ops.concatenate([p5_upsampled, p4], axis=-1) p4p5 = apply_csp_block( p4p5, channels=p4.shape[-1], depth=depth, shortcut=False, activation="swish", name=f"{name}_p4p5", ) # Upsample P4P5 and concatenate with P3, then apply a CSPBlock. p4p5_upsampled = ops.repeat(ops.repeat(p4p5, 2, axis=1), 2, axis=2) p3p4p5 = ops.concatenate([p4p5_upsampled, p3], axis=-1) p3p4p5 = apply_csp_block( p3p4p5, channels=p3.shape[-1], depth=depth, shortcut=False, activation="swish", name=f"{name}_p3p4p5", ) # Downsample P3P4P5, concatenate with P4P5, and apply a CSP Block. p3p4p5_d1 = apply_conv_bn( p3p4p5, p3p4p5.shape[-1], kernel_size=3, strides=2, activation="swish", name=f"{name}_p3p4p5_downsample1", ) p3p4p5_d1 = ops.concatenate([p3p4p5_d1, p4p5], axis=-1) p3p4p5_d1 = apply_csp_block( p3p4p5_d1, channels=p4p5.shape[-1], shortcut=False, activation="swish", name=f"{name}_p3p4p5_downsample1_block", ) # Downsample the resulting P3P4P5 again, concatenate with P5, and apply # another CSP Block. p3p4p5_d2 = apply_conv_bn( p3p4p5_d1, p3p4p5_d1.shape[-1], kernel_size=3, strides=2, activation="swish", name=f"{name}_p3p4p5_downsample2", ) p3p4p5_d2 = ops.concatenate([p3p4p5_d2, p5], axis=-1) p3p4p5_d2 = apply_csp_block( p3p4p5_d2, channels=p5.shape[-1], shortcut=False, activation="swish", name=f"{name}_p3p4p5_downsample2_block", ) return [p3p4p5, p3p4p5_d1, p3p4p5_d2] def apply_yolo_v8_head( inputs, num_classes, name="yolo_v8_head", ): """Applies a YOLOV8 head. Makes box and class predictions based on the output of a feature pyramid network. Args: inputs: list of tensors output by the Feature Pyramid Network, should have the same shape as the P3, P4, and P5 outputs of the backbone. num_classes: integer, the number of classes that a bounding box could possibly be assigned to. name: string, a prefix for names of layers used by the head. Returns: A dictionary with two entries. The "boxes" entry contains box regression predictions, while the "classes" entry contains class predictions. """ # 64 is the default number of channels, as 16 components are used to predict # each of the 4 offsets for corner points of a bounding box with respect # to the center point. In cases where the input has much higher resolution # (e.g. the P3 input has >256 channels), we use additional channels for # the intermediate conv layers. This is only true for very large backbones. box_channels = max(BOX_REGRESSION_CHANNELS, inputs[0].shape[-1] // 4) # We use at least num_classes channels for intermediate conv layer for class # predictions. In most cases, the P3 input has many more channels than the # number of classes, so we preserve those channels until the final layer. class_channels = max(num_classes, inputs[0].shape[-1]) # We compute box and class predictions for each of the feature maps from # the FPN and then combine them. outputs = [] for id, feature in enumerate(inputs): cur_name = f"{name}_{id+1}" box_predictions = apply_conv_bn( feature, box_channels, kernel_size=3, activation="swish", name=f"{cur_name}_box_1", ) box_predictions = apply_conv_bn( box_predictions, box_channels, kernel_size=3, activation="swish", name=f"{cur_name}_box_2", ) box_predictions = keras.layers.Conv2D( filters=BOX_REGRESSION_CHANNELS, kernel_size=1, name=f"{cur_name}_box_3_conv", )(box_predictions) class_predictions = apply_conv_bn( feature, class_channels, kernel_size=3, activation="swish", name=f"{cur_name}_class_1", ) class_predictions = apply_conv_bn( class_predictions, class_channels, kernel_size=3, activation="swish", name=f"{cur_name}_class_2", ) class_predictions = keras.layers.Conv2D( filters=num_classes, kernel_size=1, name=f"{cur_name}_class_3_conv", )(class_predictions) class_predictions = keras.layers.Activation( "sigmoid", name=f"{cur_name}_classifier" )(class_predictions) out = ops.concatenate([box_predictions, class_predictions], axis=-1) out = keras.layers.Reshape( [-1, out.shape[-1]], name=f"{cur_name}_output_reshape" )(out) outputs.append(out) outputs = ops.concatenate(outputs, axis=1) outputs = keras.layers.Activation( "linear", dtype="float32", name="box_outputs" )(outputs) return { "boxes": outputs[:, :, :BOX_REGRESSION_CHANNELS], "classes": outputs[:, :, BOX_REGRESSION_CHANNELS:], } def decode_regression_to_boxes(preds): """Decodes the results of the YOLOV8Detector forward-pass into boxes. Returns left / top / right / bottom predictions with respect to anchor points. Each coordinate is encoded with 16 predicted values. Those predictions are softmaxed and multiplied by [0..15] to make predictions. The resulting predictions are relative to the stride of an anchor box (and correspondingly relative to the scale of the feature map from which the predictions came). """ preds_bbox = keras.layers.Reshape((-1, 4, BOX_REGRESSION_CHANNELS // 4))( preds ) preds_bbox = ops.nn.softmax(preds_bbox, axis=-1) * ops.arange( BOX_REGRESSION_CHANNELS // 4, dtype="float32" ) return ops.sum(preds_bbox, axis=-1) def dist2bbox(distance, anchor_points): """Decodes distance predictions into xyxy boxes. Input left / top / right / bottom predictions are transformed into xyxy box predictions based on anchor points. The resulting xyxy predictions must be scaled by the stride of their corresponding anchor points to yield an absolute xyxy box. """ left_top, right_bottom = ops.split(distance, 2, axis=-1) x1y1 = anchor_points - left_top x2y2 = anchor_points + right_bottom return ops.concatenate((x1y1, x2y2), axis=-1) # xyxy bbox @keras_cv_export( [ "keras_cv.models.YOLOV8Detector", "keras_cv.models.object_detection.YOLOV8Detector", ] ) class YOLOV8Detector(Task): """Implements the YOLOV8 architecture for object detection. Args: backbone: `keras.Model`, must implement the `pyramid_level_inputs` property with keys "P2", "P3", and "P4" and layer names as values. A sensible backbone to use is the `keras_cv.models.YOLOV8Backbone`. num_classes: integer, the number of classes in your dataset excluding the background class. Classes should be represented by integers in the range [0, num_classes). bounding_box_format: string, the format of bounding boxes of input dataset. Refer [to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/) for more details on supported bounding box formats. fpn_depth: integer, a specification of the depth of the CSP blocks in the Feature Pyramid Network. This is usually 1, 2, or 3, depending on the size of your YOLOV8Detector model. We recommend using 3 for "yolo_v8_l_backbone" and "yolo_v8_xl_backbone". Defaults to 2. label_encoder: (Optional) A `YOLOV8LabelEncoder` that is responsible for transforming input boxes into trainable labels for YOLOV8Detector. If not provided, a default is provided. prediction_decoder: (Optional) A `keras.layers.Layer` that is responsible for transforming YOLOV8 predictions into usable bounding boxes. If not provided, a default is provided. The default `prediction_decoder` layer is a `keras_cv.layers.MultiClassNonMaxSuppression` layer, which uses a Non-Max Suppression for box pruning. Examples: ```python images = tf.ones(shape=(1, 512, 512, 3)) labels = { "boxes": tf.constant([ [ [0, 0, 100, 100], [100, 100, 200, 200], [300, 300, 100, 100], ] ], dtype=tf.float32), "classes": tf.constant([[1, 1, 1]], dtype=tf.int64), } model = keras_cv.models.YOLOV8Detector( num_classes=20, bounding_box_format="xywh", backbone=keras_cv.models.YOLOV8Backbone.from_preset( "yolo_v8_m_backbone_coco" ), fpn_depth=2 ) # Evaluate model without box decoding and NMS model(images) # Prediction with box decoding and NMS model.predict(images) # Train model model.compile( classification_loss='binary_crossentropy', box_loss='ciou', optimizer=tf.optimizers.SGD(global_clipnorm=10.0), jit_compile=False, ) model.fit(images, labels) ``` """ # noqa: E501 def __init__( self, backbone, num_classes, bounding_box_format, fpn_depth=2, label_encoder=None, prediction_decoder=None, **kwargs, ): extractor_levels = ["P3", "P4", "P5"] extractor_layer_names = [ backbone.pyramid_level_inputs[i] for i in extractor_levels ] feature_extractor = get_feature_extractor( backbone, extractor_layer_names, extractor_levels ) images = keras.layers.Input(feature_extractor.input_shape[1:]) features = list(feature_extractor(images).values()) fpn_features = apply_path_aggregation_fpn( features, depth=fpn_depth, name="pa_fpn" ) outputs = apply_yolo_v8_head( fpn_features, num_classes, ) # To make loss metrics pretty, we use a no-op layer with a good name. boxes = keras.layers.Concatenate(axis=1, name="box")([outputs["boxes"]]) scores = keras.layers.Concatenate(axis=1, name="class")( [outputs["classes"]] ) outputs = {"boxes": boxes, "classes": scores} super().__init__(inputs=images, outputs=outputs, **kwargs) self.bounding_box_format = bounding_box_format self._prediction_decoder = ( prediction_decoder or layers.NonMaxSuppression( bounding_box_format=bounding_box_format, from_logits=False, confidence_threshold=0.2, iou_threshold=0.7, ) ) self.backbone = backbone self.fpn_depth = fpn_depth self.num_classes = num_classes self.label_encoder = label_encoder or YOLOV8LabelEncoder( num_classes=num_classes ) def compile( self, box_loss, classification_loss, box_loss_weight=7.5, classification_loss_weight=0.5, metrics=None, **kwargs, ): """Compiles the YOLOV8Detector. `compile()` mirrors the standard Keras `compile()` method, but has one key distinction -- two losses must be provided: `box_loss` and `classification_loss`. Args: box_loss: a Keras loss to use for box offset regression. A preconfigured loss is provided when the string "ciou" is passed. classification_loss: a Keras loss to use for box classification. A preconfigured loss is provided when the string "binary_crossentropy" is passed. box_loss_weight: (optional) float, a scaling factor for the box loss. Defaults to 7.5. classification_loss_weight: (optional) float, a scaling factor for the classification loss. Defaults to 0.5. kwargs: most other `keras.Model.compile()` arguments are supported and propagated to the `keras.Model` class. """ if metrics is not None: raise ValueError("User metrics not yet supported for YOLOV8") if isinstance(box_loss, str): if box_loss == "ciou": box_loss = CIoULoss(bounding_box_format="xyxy", reduction="sum") elif box_loss == "iou": warnings.warn( "YOLOV8 recommends using CIoU loss, but was configured to " "use standard IoU. Consider using `box_loss='ciou'` " "instead." ) else: raise ValueError( f"Invalid box loss for YOLOV8Detector: {box_loss}. Box " "loss should be a keras.Loss or the string 'ciou'." ) if isinstance(classification_loss, str): if classification_loss == "binary_crossentropy": classification_loss = keras.losses.BinaryCrossentropy( reduction="sum" ) else: raise ValueError( "Invalid classification loss for YOLOV8Detector: " f"{classification_loss}. Classification loss should be a " "keras.Loss or the string 'binary_crossentropy'." ) self.box_loss = box_loss self.classification_loss = classification_loss self.box_loss_weight = box_loss_weight self.classification_loss_weight = classification_loss_weight losses = { "box": self.box_loss, "class": self.classification_loss, } super().compile(loss=losses, **kwargs) def train_step(self, *args): data = args[-1] args = args[:-1] x, y = unpack_input(data) return super().train_step(*args, (x, y)) def test_step(self, *args): data = args[-1] args = args[:-1] x, y = unpack_input(data) return super().test_step(*args, (x, y)) def compute_loss(self, x, y, y_pred, sample_weight=None, **kwargs): box_pred, cls_pred = y_pred["boxes"], y_pred["classes"] pred_boxes = decode_regression_to_boxes(box_pred) pred_scores = cls_pred anchor_points, stride_tensor = get_anchors(image_shape=x.shape[1:]) stride_tensor = ops.expand_dims(stride_tensor, axis=-1) gt_labels = y["classes"] mask_gt = ops.all(y["boxes"] > -1.0, axis=-1, keepdims=True) gt_bboxes = bounding_box.convert_format( y["boxes"], source=self.bounding_box_format, target="xyxy", images=x, ) pred_bboxes = dist2bbox(pred_boxes, anchor_points) target_bboxes, target_scores, fg_mask = self.label_encoder( pred_scores, ops.cast(pred_bboxes * stride_tensor, gt_bboxes.dtype), anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt, ) target_bboxes /= stride_tensor target_scores_sum = ops.maximum(ops.sum(target_scores), 1) box_weight = ops.expand_dims( ops.sum(target_scores, axis=-1) * fg_mask, axis=-1, ) y_true = { "box": target_bboxes * fg_mask[..., None], "class": target_scores, } y_pred = { "box": pred_bboxes * fg_mask[..., None], "class": pred_scores, } sample_weights = { "box": self.box_loss_weight * box_weight / target_scores_sum, "class": self.classification_loss_weight / target_scores_sum, } return super().compute_loss( x=x, y=y_true, y_pred=y_pred, sample_weight=sample_weights, **kwargs ) def decode_predictions( self, pred, images, ): boxes = pred["boxes"] scores = pred["classes"] boxes = decode_regression_to_boxes(boxes) anchor_points, stride_tensor = get_anchors(image_shape=images.shape[1:]) stride_tensor = ops.expand_dims(stride_tensor, axis=-1) box_preds = dist2bbox(boxes, anchor_points) * stride_tensor box_preds = bounding_box.convert_format( box_preds, source="xyxy", target=self.bounding_box_format, images=images, ) return self.prediction_decoder(box_preds, scores) def predict_step(self, *args): outputs = super().predict_step(*args) if isinstance(outputs, tuple): return self.decode_predictions(outputs[0], args[-1]), outputs[1] else: return self.decode_predictions(outputs, args[-1]) @property def prediction_decoder(self): return self._prediction_decoder @prediction_decoder.setter def prediction_decoder(self, prediction_decoder): if prediction_decoder.bounding_box_format != self.bounding_box_format: raise ValueError( "Expected `prediction_decoder` and YOLOV8Detector to " "use the same `bounding_box_format`, but got " "`prediction_decoder.bounding_box_format=" f"{prediction_decoder.bounding_box_format}`, and " "`self.bounding_box_format=" f"{self.bounding_box_format}`." ) self._prediction_decoder = prediction_decoder self.make_predict_function(force=True) self.make_train_function(force=True) self.make_test_function(force=True) def get_config(self): return { "num_classes": self.num_classes, "bounding_box_format": self.bounding_box_format, "fpn_depth": self.fpn_depth, "backbone": keras.saving.serialize_keras_object(self.backbone), "label_encoder": keras.saving.serialize_keras_object( self.label_encoder ), "prediction_decoder": keras.saving.serialize_keras_object( self._prediction_decoder ), } @classmethod def from_config(cls, config): config["backbone"] = keras.saving.deserialize_keras_object( config["backbone"] ) label_encoder = config.get("label_encoder") if label_encoder is not None and isinstance(label_encoder, dict): config["label_encoder"] = keras.saving.deserialize_keras_object( label_encoder ) prediction_decoder = config.get("prediction_decoder") if prediction_decoder is not None and isinstance( prediction_decoder, dict ): config["prediction_decoder"] = ( keras.saving.deserialize_keras_object(prediction_decoder) ) return cls(**config) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return copy.deepcopy({**backbone_presets, **yolo_v8_detector_presets}) @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return copy.deepcopy( {**backbone_presets_with_weights, **yolo_v8_detector_presets} ) @classproperty def backbone_presets(cls): """Dictionary of preset names and configurations of compatible backbones.""" return copy.deepcopy(backbone_presets)
keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector.py/0
{ "file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector.py", "repo_id": "keras-cv", "token_count": 11170 }
26
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.layers.object_detection_3d.heatmap_decoder import HeatmapDecoder from keras_cv.models.object_detection_3d.center_pillar_backbone_presets import ( backbone_presets, ) from keras_cv.models.task import Task from keras_cv.utils.python_utils import classproperty @keras_cv_export("keras_cv.models.MultiHeadCenterPillar") class MultiHeadCenterPillar(Task): """Multi headed model based on CenterNet heatmap and PointPillar. This model builds box classification and regression for each class separately. It voxelizes the point cloud feature, applies feature extraction on top of voxelized feature, and applies multi-class classification and regression heads on the feature map. Args: backbone: the backbone to apply to voxelized features. voxel_net: the voxel_net that takes point cloud feature and convert to voxelized features. KerasCV offers a `DynamicVoxelization` layer in `keras_cv.layers` which is a reasonable default for most detection use cases. multiclass_head: A keras.layers.Layer which takes the backbone output and returns a dict of heatmap prediction and regression prediction per class. prediction_decoder: a multi class heatmap prediction decoder that returns a dict of decoded boxes, box class, and box confidence score per class. """ def __init__( self, backbone, voxel_net, multiclass_head, prediction_decoder, **kwargs, ): point_xyz = keras.layers.Input((None, 3), name="point_xyz") point_feature = keras.layers.Input((None, 4), name="point_feature") point_mask = keras.layers.Input( (None, 1), name="point_mask", dtype="bool" ) inputs = { "point_xyz": point_xyz, "point_feature": point_feature, "point_mask": point_mask, } voxel_feature = voxel_net(point_xyz, point_feature, point_mask[..., 0]) voxel_feature = backbone(voxel_feature) predictions = multiclass_head(voxel_feature) # A slight hack to get the output names in the model outputs for a # functional model. for head_name in multiclass_head._head_names: predictions[f"box_{head_name}"] = keras.layers.Identity( name=f"box_{head_name}" )(predictions[head_name]) predictions[f"heatmap_{head_name}"] = keras.layers.Identity( name=f"heatmap_{head_name}" )(predictions[head_name]) super().__init__(inputs=inputs, outputs=predictions, **kwargs) self._backbone = backbone self._multiclass_head = multiclass_head self._prediction_decoder = prediction_decoder self._head_names = self._multiclass_head._head_names def compile(self, heatmap_loss=None, box_loss=None, **kwargs): """Compiles the MultiHeadCenterPillar. `compile()` mirrors the standard Keras `compile()` method, but allows for specification of heatmap and box-specific losses. Args: heatmap_loss: a Keras loss to use for heatmap regression. box_loss: a Keras loss to use for box regression, or a list of Keras losses for box regression, one for each class. If only one loss is specified, it will be used for all classes, otherwise exactly one loss should be specified per class. kwargs: other `keras.Model.compile()` arguments are supported and propagated to the `keras.Model` class. """ losses = {} if box_loss is not None and not isinstance(box_loss, list): box_loss = [ box_loss for _ in range(self._multiclass_head._num_classes) ] for i in range(self._multiclass_head._num_classes): losses[f"heatmap_class_{i+1}"] = heatmap_loss losses[f"box_class_{i+1}"] = box_loss[i] super().compile(loss=losses, **kwargs) def compute_loss(self, x, y, y_pred, sample_weight=None, **kwargs): predictions = y_pred targets = y y_pred = {} y_true = {} sample_weight = {} for head_name in self._head_names: prediction = predictions[head_name] heatmap_pred = ops.softmax(prediction[..., :2])[..., 1] box_pred = prediction[..., 2:] box = targets[head_name]["boxes"] heatmap = targets[head_name]["heatmap"] index = targets[head_name]["top_k_index"] # the prediction returns 2 outputs for background vs object y_pred["heatmap_" + head_name] = heatmap_pred y_true["heatmap_" + head_name] = heatmap # TODO(ianstenbit): loss heatmap threshold should be configurable. box_regression_mask = ( ops.take_along_axis( ops.reshape(heatmap, (heatmap.shape[0], -1)), index[..., 0] * heatmap.shape[1] + index[..., 1], axis=1, ) > 0.95 ) box = ops.take_along_axis( ops.reshape(box, (ops.shape(box)[0], -1, 7)), ops.expand_dims( index[..., 0] * ops.shape(box)[1] + index[..., 1], axis=-1 ), axis=1, ) box_pred = ops.take_along_axis( ops.reshape( box_pred, (ops.shape(box_pred)[0], -1, ops.shape(box_pred)[-1]), ), ops.expand_dims( index[..., 0] * ops.shape(box_pred)[1] + index[..., 1], axis=-1, ), axis=1, ) box_center_mask = heatmap > 0.99 num_boxes = ops.maximum( ops.sum(ops.cast(box_center_mask, "float32"), axis=[1, 2]), 1 ) sample_weight["box_" + head_name] = ops.cast( box_regression_mask, "float32" ) / ops.broadcast_to( ops.expand_dims(num_boxes, axis=-1), ops.shape(box_regression_mask), ) sample_weight["heatmap_" + head_name] = ops.ones_like( heatmap ) / ops.broadcast_to( ops.expand_dims(ops.expand_dims(num_boxes, axis=-1), axis=-1), heatmap.shape, ) y_pred["box_" + head_name] = box_pred y_true["box_" + head_name] = box return super().compute_loss( x={}, y=y_true, y_pred=y_pred, sample_weight=sample_weight ) def predict_step(self, *args): outputs = super().predict_step(*args) if isinstance(outputs, tuple): return self._prediction_decoder(outputs[0]), outputs[1] else: return self._prediction_decoder(outputs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return copy.deepcopy(backbone_presets) @classproperty def backbone_presets(cls): """Dictionary of preset names and configurations of compatible backbones.""" return copy.deepcopy(backbone_presets) class MultiClassDetectionHead(keras.layers.Layer): """Multi-class object detection head for CenterPillar. This head includes a 1x1 convolution layer for each class which is called on the output of the CenterPillar's backbone. The outputs are per-class prediction heatmaps which must be decoded into 3D boxes. Args: num_classes: int, the number of box classes to predict. num_head_bin: list of ints, the number of heading bins to use for each respective box class. """ def __init__( self, num_classes, num_head_bin, name="detection_head", ): super().__init__(name=name) self._heads = {} self._head_names = [] self._num_classes = num_classes self._num_head_bin = num_head_bin for i in range(num_classes): self._head_names.append(f"class_{i + 1}") # 1x1 conv for each voxel/pixel. self._heads[self._head_names[i]] = keras.layers.Conv2D( # 2 for class, 3 for location, 3 for size, 2N for heading filters=8 + 2 * num_head_bin[i], kernel_size=(1, 1), name=f"head_{i + 1}", ) def call(self, feature, training=True): del training outputs = {} for head_name in self._head_names: outputs[head_name] = self._heads[head_name](feature) return outputs class MultiClassHeatmapDecoder(keras.layers.Layer): """Heatmap decoder for CenterPillar models. The heatmap decoder converts a sparse heatmap of box predictions into a padded dense set of decoded predicted boxes. The input to the heatmap decoder is a spatial heatmap of encoded box predictions, and the output is decoded 3D boxes in CENTER_XYZ_DXDYDZ_PHI format. Args: num_classes: int, the number of box classes to predict. num_head_bin: list of ints, the number of heading bins for each respective class. anchor_size: list of length-3 lists of floats, the 3D anchor sizes for each respective class. max_pool_size: list of ints, the 2D pooling size for the heatmap, to be used before box decoding. max_num_box: list of ints, the maximum number of boxes to return for each class. The top K boxes will be returned, and if fewer than K boxes are predicted, the outputs will be padded to contain K boxes. heatmap_threshold: list of floats, the heatmap confidence threshold to be used for each respective class to determine whether or not a box prediction is strong enough to decode and return. voxel_size: list of floats, the size of the voxels that were used to voxelize inputs to the CenterPillar model for each respective class. spatial_size: list of floats, the global 3D size of the heatmap for each respective class. `spatial_size[i] / voxel_size[i]` equals the size of the `i`th rank of the input heatmap. """ def __init__( self, num_classes, num_head_bin, anchor_size, max_pool_size, max_num_box, heatmap_threshold, voxel_size, spatial_size, **kwargs, ): super().__init__(**kwargs) self.num_classes = num_classes self.class_ids = list(range(1, num_classes + 1)) self.num_head_bin = num_head_bin self.anchor_size = anchor_size self.max_pool_size = max_pool_size self.max_num_box = max_num_box self.heatmap_threshold = heatmap_threshold self.voxel_size = voxel_size self.spatial_size = spatial_size self.decoders = {} for i, class_id in enumerate(self.class_ids): self.decoders[f"class_{class_id}"] = HeatmapDecoder( class_id=class_id, num_head_bin=self.num_head_bin[i], anchor_size=self.anchor_size[i], max_pool_size=self.max_pool_size[i], max_num_box=self.max_num_box[i], heatmap_threshold=self.heatmap_threshold[i], voxel_size=self.voxel_size, spatial_size=self.spatial_size, ) def call(self, predictions): box_predictions = [] class_predictions = [] box_confidence = [] for class_id in self.class_ids: class_tag = f"class_{class_id}" boxes, classes, confidence = self.decoders[class_tag]( predictions[class_tag] ) box_predictions.append(boxes) class_predictions.append(classes) box_confidence.append(confidence) return { "3d_boxes": { "boxes": ops.concatenate(box_predictions, axis=1), "classes": ops.concatenate(class_predictions, axis=1), "confidence": ops.concatenate(box_confidence, axis=1), } }
keras-cv/keras_cv/models/object_detection_3d/center_pillar.py/0
{ "file_path": "keras-cv/keras_cv/models/object_detection_3d/center_pillar.py", "repo_id": "keras-cv", "token_count": 5943 }
27
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_cv.models.segmentation.segformer.segformer import SegFormer from keras_cv.models.segmentation.segformer.segformer_presets import presets from keras_cv.utils.python_utils import classproperty ALIAS_DOCSTRING = """SegFormer model. For transfer learning use cases, make sure to read the [guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/). Args: backbone: a KerasCV backbone for feature extraction. num_classes: the number of classes for segmentation, including the background class. Examples: ```python input_data = tf.ones(shape=(8, 224, 224, 3)) # Randomly initialized backbone backbone = keras_cv.models.MiTBackbone.from_preset("mit_b0_imagenet") segformer = keras_cv.models.SegFormer(backbone=backbone, num_classes=19) output = model(input_data) ``` """ # noqa: E501 class SegFormerB0(SegFormer): def __new__( cls, num_classes, **kwargs, ): # Pack args in kwargs kwargs.update( { "num_classes": num_classes, } ) return SegFormer.from_preset("segformer_b0", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "segformer_b0": copy.deepcopy(presets["segformer_b0"]), } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets class SegFormerB1(SegFormer): def __new__( cls, num_classes, **kwargs, ): # Pack args in kwargs kwargs.update( { "num_classes": num_classes, } ) return SegFormer.from_preset("segformer_b1", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "segformer_b1": copy.deepcopy(presets["segformer_b1"]), } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets class SegFormerB2(SegFormer): def __new__( cls, num_classes, **kwargs, ): # Pack args in kwargs kwargs.update( { "num_classes": num_classes, } ) return SegFormer.from_preset("segformer_b2", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "segformer_b2": copy.deepcopy(presets["segformer_b2"]), } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets class SegFormerB3(SegFormer): def __new__( cls, num_classes, **kwargs, ): # Pack args in kwargs kwargs.update( { "num_classes": num_classes, } ) return SegFormer.from_preset("segformer_b3", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "segformer_b3": copy.deepcopy(presets["segformer_b3"]), } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets class SegFormerB4(SegFormer): def __new__( cls, num_classes, **kwargs, ): # Pack args in kwargs kwargs.update( { "num_classes": num_classes, } ) return SegFormer.from_preset("segformer_b4", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "segformer_b4": copy.deepcopy(presets["segformer_b4"]), } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets class SegFormerB5(SegFormer): def __new__( cls, num_classes, **kwargs, ): # Pack args in kwargs kwargs.update( { "num_classes": num_classes, } ) return SegFormer.from_preset("segformer_b5", **kwargs) @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return { "segformer_b5": copy.deepcopy(presets["segformer_b5"]), } @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return cls.presets setattr( SegFormerB0, "__doc__", ALIAS_DOCSTRING.format(name="SegFormerB0"), ) setattr( SegFormerB1, "__doc__", ALIAS_DOCSTRING.format(name="SegFormerB1"), ) setattr( SegFormerB2, "__doc__", ALIAS_DOCSTRING.format(name="SegFormerB2"), ) setattr( SegFormerB3, "__doc__", ALIAS_DOCSTRING.format(name="SegFormerB3"), ) setattr( SegFormerB4, "__doc__", ALIAS_DOCSTRING.format(name="SegFormerB4"), ) setattr( SegFormerB5, "__doc__", ALIAS_DOCSTRING.format(name="SegFormerB5"), )
keras-cv/keras_cv/models/segmentation/segformer/segformer_aliases.py/0
{ "file_path": "keras-cv/keras_cv/models/segmentation/segformer/segformer_aliases.py", "repo_id": "keras-cv", "token_count": 2719 }
28
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This code is taken nearly verbatim from https://github.com/divamgupta/stable-diffusion-tensorflow.""" import gzip import html from functools import lru_cache import regex as re from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras @lru_cache() def bytes_to_unicode(): """Return a list of utf-8 bytes and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. A word is represented as tuple of symbols(symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def basic_clean(text): text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text @keras_cv_export("keras_cv.models.stable_diffusion.SimpleTokenizer") class SimpleTokenizer: def __init__(self, bpe_path=None): bpe_path = bpe_path or keras.utils.get_file( "bpe_simple_vocab_16e6.txt.gz", "https://github.com/openai/CLIP/blob/main/clip/bpe_simple_vocab_16e6.txt.gz?raw=true", # noqa: E501 file_hash="924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a", # noqa: E501 ) self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} merges = gzip.open(bpe_path).read().decode("utf-8").split("\n") merges = merges[1 : 49152 - 256 - 2 + 1] merges = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = vocab + [v + "</w>" for v in vocab] for merge in merges: vocab.append("".join(merge)) vocab.extend(["<|startoftext|>", "<|endoftext|>"]) self.vocab = vocab self.encoder = self._create_encoder(self.vocab) self.decoder = self._create_decoder(self.encoder) self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.special_tokens = { "<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>", } self.cache = { "<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>", } self.pat = self._create_pat() def _create_encoder(self, vocab): return dict(zip(vocab, range(len(vocab)))) def _create_decoder(self, encoder): return {v: k for k, v in encoder.items()} def _create_pat(self): return re.compile( "|".join([re.escape(key) for key in self.special_tokens.keys()]) + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE, ) @property def end_of_text(self): return self.encoder["<|endoftext|>"] @property def start_of_text(self): return self.encoder["<|startoftext|>"] def add_tokens(self, tokens): if isinstance(tokens, str): tokens = [tokens] tokens_added = 0 for token in tokens: if token in self.vocab: continue tokens_added += 1 self.vocab.append(token) self.special_tokens[token] = token self.cache[token] = token self.encoder = self._create_encoder(self.vocab) self.decoder = self._create_decoder(self.encoder) self.pat = self._create_pat() return tokens_added def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token[:-1]) + (token[-1] + "</w>",) pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min( pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")) ) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if ( word[i] == first and i < len(word) - 1 and word[i + 1] == second ): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) bpe_tokens.extend( self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") ) return [self.start_of_text] + bpe_tokens + [self.end_of_text] def decode(self, tokens): text = "".join([self.decoder[token] for token in tokens]) text = ( bytearray([self.byte_decoder[c] for c in text]) .decode("utf-8", errors="replace") .replace("</w>", " ") ) return text
keras-cv/keras_cv/models/stable_diffusion/clip_tokenizer.py/0
{ "file_path": "keras-cv/keras_cv/models/stable_diffusion/clip_tokenizer.py", "repo_id": "keras-cv", "token_count": 3452 }
29
# Copyright 2022 The KerasCV Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for IoU3D using custom op.""" import math import os import pytest from keras_cv.ops import iou_3d from keras_cv.tests.test_case import TestCase class IoU3DTest(TestCase): @pytest.mark.skipif( "TEST_CUSTOM_OPS" not in os.environ or os.environ["TEST_CUSTOM_OPS"] != "true", reason="Requires binaries compiled from source", ) def testOpCall(self): # Predicted boxes: # 0: a 2x2x2 box centered at 0,0,0, rotated 0 degrees # 1: a 2x2x2 box centered at 1,1,1, rotated 135 degrees # Ground Truth boxes: # 0: a 2x2x2 box centered at 1,1,1, rotated 45 degrees # (identical to predicted box 1) # 1: a 2x2x2 box centered at 1,1,1, rotated 0 degrees box_preds = [[0, 0, 0, 2, 2, 2, 0], [1, 1, 1, 2, 2, 2, 3 * math.pi / 4]] box_gt = [[1, 1, 1, 2, 2, 2, math.pi / 4], [1, 1, 1, 2, 2, 2, 0]] # Predicted box 0 and both ground truth boxes overlap by 1/8th of the # box. Therefore, IiU is 1/15. # Predicted box 1 is the same as ground truth box 0, therefore IoU is 1. # Predicted box 1 shares an origin with ground truth box 1, but is # rotated by 135 degrees. # Their IoU can be reduced to that of two overlapping squares that # share a center with the same offset of 135 degrees, which reduces to # the square root of 0.5. expected_ious = [[1 / 15, 1 / 15], [1, 0.5**0.5]] self.assertAllClose(iou_3d(box_preds, box_gt), expected_ious)
keras-cv/keras_cv/ops/iou_3d_test.py/0
{ "file_path": "keras-cv/keras_cv/ops/iou_3d_test.py", "repo_id": "keras-cv", "token_count": 820 }
30
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.utils.conditional_imports import assert_cv2_installed from keras_cv.utils.conditional_imports import assert_matplotlib_installed from keras_cv.utils.conditional_imports import ( assert_waymo_open_dataset_installed, ) from keras_cv.utils.fill_utils import fill_rectangle from keras_cv.utils.preprocessing import blend from keras_cv.utils.preprocessing import ensure_tensor from keras_cv.utils.preprocessing import get_interpolation from keras_cv.utils.preprocessing import parse_factor from keras_cv.utils.preprocessing import transform from keras_cv.utils.preprocessing import transform_value_range from keras_cv.utils.to_numpy import to_numpy from keras_cv.utils.train import convert_inputs_to_tf_dataset from keras_cv.utils.train import scale_loss_for_distribution
keras-cv/keras_cv/utils/__init__.py/0
{ "file_path": "keras-cv/keras_cv/utils/__init__.py", "repo_id": "keras-cv", "token_count": 398 }
31
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """KerasCV Version check.""" try: import tensorflow as tf except ModuleNotFoundError: raise ModuleNotFoundError( "KerasCV uses TensorFlow for its " "preprocessing layers. While this dependency " "will be dropped in the future, please install " "TensorFlow with `pip install tensorflow` to " "use KerasCV" ) from packaging.version import parse MIN_VERSION = "2.11.0" def check_tf_version(): if parse(tf.__version__) < parse(MIN_VERSION): raise RuntimeError( "The Tensorflow package version needs to be at least " f"{MIN_VERSION} for KerasCV to run. Currently, your TensorFlow " f"version is {tf.__version__}. Please upgrade with `$ pip install " "--upgrade tensorflow`. You can use `pip freeze` to check " "afterwards that everything is ok." )
keras-cv/keras_cv/version_check.py/0
{ "file_path": "keras-cv/keras_cv/version_check.py", "repo_id": "keras-cv", "token_count": 501 }
32
# Copyright 2019 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Setup script.""" import os import pathlib from setuptools import find_packages from setuptools import setup from setuptools.dist import Distribution def read(rel_path): here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, rel_path)) as fp: return fp.read() def get_version(rel_path): for line in read(rel_path).splitlines(): if line.startswith("__version__"): delim = '"' if '"' in line else "'" return line.split(delim)[1] raise RuntimeError("Unable to find version string.") BUILD_WITH_CUSTOM_OPS = ( "BUILD_WITH_CUSTOM_OPS" in os.environ and os.environ["BUILD_WITH_CUSTOM_OPS"] == "true" ) HERE = pathlib.Path(__file__).parent README = (HERE / "README.md").read_text() if os.path.exists("keras_cv/version_utils.py"): VERSION = get_version("keras_cv/version_utils.py") else: VERSION = get_version("keras_cv/src/version_utils.py") class BinaryDistribution(Distribution): """This class is needed in order to create OS specific wheels.""" def has_ext_modules(self): return BUILD_WITH_CUSTOM_OPS def is_pure(self): return not BUILD_WITH_CUSTOM_OPS setup( name="keras-cv", description="Industry-strength computer Vision extensions for Keras.", long_description=README, long_description_content_type="text/markdown", version=VERSION, url="https://github.com/keras-team/keras-cv", author="Keras team", author_email="keras-cv@google.com", license="Apache License 2.0", install_requires=[ "packaging", "absl-py", "regex", "tensorflow-datasets", "keras-core", "kagglehub", ], extras_require={ "tests": [ "flake8", "isort", "black[jupyter]", "pytest", "pycocotools", ], "examples": ["tensorflow_datasets", "matplotlib"], }, distclass=BinaryDistribution, # Supported Python versions python_requires=">=3.9", classifiers=[ "Development Status :: 3 - Alpha", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3 :: Only", "Operating System :: Unix", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering", "Topic :: Software Development", ], packages=find_packages(exclude=("*_test.py",)), include_package_data=True, )
keras-cv/setup.py/0
{ "file_path": "keras-cv/setup.py", "repo_id": "keras-cv", "token_count": 1293 }
33