text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.merging.base_merge import Merge
@keras_core_export("keras_core.layers.Concatenate")
class Concatenate(Merge):
"""Concatenates a list of inputs.
It takes as input a list of tensors, all of the same shape except
for the concatenation axis, and returns a single tensor that is the
concatenation of all inputs.
Examples:
>>> x = np.arange(20).reshape(2, 2, 5)
>>> y = np.arange(20, 30).reshape(2, 1, 5)
>>> keras_core.layers.Concatenate(axis=1)([x, y])
Usage in a Keras model:
>>> x1 = keras_core.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = keras_core.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> y = keras_core.layers.Concatenate()([x1, x2])
Args:
axis: Axis along which to concatenate.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self._reshape_required = False
def build(self, input_shape):
# Used purely for shape validation.
if len(input_shape) < 1 or not isinstance(
input_shape[0], (tuple, list)
):
raise ValueError(
"A `Concatenate` layer should be called on a list of "
f"at least 1 input. Received: input_shape={input_shape}"
)
if all(shape is None for shape in input_shape):
return
reduced_inputs_shapes = [list(shape) for shape in input_shape]
shape_set = set()
for i in range(len(reduced_inputs_shapes)):
# Convert self.axis to positive axis for each input
# in case self.axis is a negative number
concat_axis = self.axis % len(reduced_inputs_shapes[i])
# Skip batch axis.
for axis, axis_value in enumerate(
reduced_inputs_shapes[i][1:], start=1
):
# Remove squeezable axes (axes with value of 1)
# if not in the axis that will be used for concatenation
# otherwise leave it.
# This approach allows building the layer,
# but if tensor shapes are not the same when
# calling, an exception will be raised.
if axis != concat_axis and axis_value == 1:
del reduced_inputs_shapes[i][axis]
if len(reduced_inputs_shapes[i]) > self.axis:
del reduced_inputs_shapes[i][self.axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) != 1:
err_msg = (
"A `Concatenate` layer requires inputs with matching shapes "
"except for the concatenation axis. "
f"Received: input_shape={input_shape}"
)
# Make sure all the shapes have same ranks.
ranks = set(len(shape) for shape in shape_set)
if len(ranks) != 1:
raise ValueError(err_msg)
# Get the only rank for the set.
(rank,) = ranks
for axis in range(rank):
# Skip the Nones in the shape since they are dynamic, also the
# axis for concat has been removed above.
unique_dims = set(
shape[axis]
for shape in shape_set
if shape[axis] is not None
)
if len(unique_dims) > 1:
raise ValueError(err_msg)
self.built = True
def _merge_function(self, inputs):
return ops.concatenate(inputs, axis=self.axis)
def compute_output_shape(self, input_shape):
if (not isinstance(input_shape, (tuple, list))) or (
not isinstance(input_shape[0], (tuple, list))
):
raise ValueError(
"A `Concatenate` layer should be called on a list of inputs. "
f"Received: input_shape={input_shape}"
)
input_shapes = input_shape
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
if output_shape[self.axis] is None or shape[self.axis] is None:
output_shape[self.axis] = None
break
output_shape[self.axis] += shape[self.axis]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, (tuple, list)):
raise ValueError(f"`mask` should be a list. Received mask={mask}")
if not isinstance(inputs, (tuple, list)):
raise ValueError(
f"`inputs` should be a list. Received: inputs={inputs}"
)
if len(mask) != len(inputs):
raise ValueError(
"The lists `inputs` and `mask` should have the same length. "
f"Received: inputs={inputs} of length {len(inputs)}, and "
f"mask={mask} of length {len(mask)}"
)
if all(m is None for m in mask):
return None
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
masks.append(ops.ones_like(input_i, dtype="bool"))
elif mask_i.ndim < input_i.ndim:
# Mask is smaller than the input, expand it
masks.append(ops.expand_dims(mask_i, axis=-1))
else:
masks.append(mask_i)
concatenated = ops.concatenate(masks, axis=self.axis)
return ops.all(concatenated, axis=-1, keepdims=False)
def get_config(self):
config = {"axis": self.axis}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_core_export("keras_core.layers.concatenate")
def concatenate(inputs, axis=-1, **kwargs):
"""Functional interface to the `Concatenate` layer.
Args:
inputs: A list of input tensors.
axis: Concatenation axis.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
return Concatenate(axis=axis, **kwargs)(inputs)
| keras-core/keras_core/layers/merging/concatenate.py/0 | {
"file_path": "keras-core/keras_core/layers/merging/concatenate.py",
"repo_id": "keras-core",
"token_count": 3129
} | 36 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras_core.layers.UnitNormalization()(data)
>>> print(np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
def build(self, input_shape):
self.built = True
def call(self, inputs):
x = ops.cast(inputs, self.compute_dtype)
square_sum = ops.sum(ops.square(x), axis=self.axis, keepdims=True)
x_inv_norm = ops.rsqrt(ops.maximum(square_sum, 1e-12))
return ops.multiply(x, x_inv_norm)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
| keras-core/keras_core/layers/normalization/unit_normalization.py/0 | {
"file_path": "keras-core/keras_core/layers/normalization/unit_normalization.py",
"repo_id": "keras-core",
"token_count": 801
} | 37 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import layers
from keras_core import testing
@pytest.mark.requires_trainable_backend
class GlobalMaxPoolingBasicTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
("channels_last", False, (3, 5, 4), (3, 4)),
("channels_last", True, (3, 5, 4), (3, 1, 4)),
("channels_first", False, (3, 5, 4), (3, 5)),
)
def test_global_max_pooling1d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalMaxPooling1D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
("channels_last", False, (3, 5, 6, 4), (3, 4)),
("channels_last", True, (3, 5, 6, 4), (3, 1, 1, 4)),
("channels_first", False, (3, 5, 6, 4), (3, 5)),
)
def test_global_max_pooling2d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalMaxPooling2D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
("channels_last", False, (3, 5, 6, 5, 4), (3, 4)),
("channels_last", True, (3, 5, 6, 5, 4), (3, 1, 1, 1, 4)),
("channels_first", False, (3, 5, 6, 5, 4), (3, 5)),
)
def test_global_max_pooling3d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalMaxPooling3D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
class GlobalMaxPoolingCorrectnessTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_max_pooling1d(self, data_format, keepdims):
def np_global_max_pool1d(x, data_format, keepdims):
steps_axis = [1] if data_format == "channels_last" else [2]
res = np.apply_over_axes(np.max, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.GlobalMaxPooling1D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_global_max_pool1d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_max_pooling2d(self, data_format, keepdims):
def np_global_max_pool2d(x, data_format, keepdims):
steps_axis = [1, 2] if data_format == "channels_last" else [2, 3]
res = np.apply_over_axes(np.max, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(96, dtype="float32").reshape((2, 3, 4, 4))
layer = layers.GlobalMaxPooling2D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_global_max_pool2d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_max_pooling3d(self, data_format, keepdims):
def np_global_max_pool3d(x, data_format, keepdims):
steps_axis = (
[1, 2, 3] if data_format == "channels_last" else [2, 3, 4]
)
res = np.apply_over_axes(np.max, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(360, dtype="float32").reshape((2, 3, 3, 5, 4))
layer = layers.GlobalMaxPooling3D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_global_max_pool3d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
| keras-core/keras_core/layers/pooling/global_max_pooling_test.py/0 | {
"file_path": "keras-core/keras_core/layers/pooling/global_max_pooling_test.py",
"repo_id": "keras-core",
"token_count": 2751
} | 38 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
from keras_core.utils import backend_utils
from keras_core.utils import tf_utils
from keras_core.utils.module_utils import tensorflow as tf
@keras_core_export("keras_core.layers.Hashing")
class Hashing(Layer):
"""A preprocessing layer which hashes and bins categorical features.
This layer transforms categorical inputs to hashed output. It element-wise
converts a ints or strings to ints in a fixed range. The stable hash
function uses `tensorflow::ops::Fingerprint` to produce the same output
consistently across all platforms.
This layer uses [FarmHash64](https://github.com/google/farmhash) by default,
which provides a consistent hashed output across different platforms and is
stable across invocations, regardless of device and context, by mixing the
input bits thoroughly.
If you want to obfuscate the hashed output, you can also pass a random
`salt` argument in the constructor. In that case, the layer will use the
[SipHash64](https://github.com/google/highwayhash) hash function, with
the `salt` value serving as additional input to the hash function.
**Note:** This layer internally uses TensorFlow. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
to use this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
**Example (FarmHash64)**
>>> layer = keras_core.layers.Hashing(num_bins=3)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
array([[1],
[0],
[1],
[1],
[2]])>
**Example (FarmHash64) with a mask value**
>>> layer = keras_core.layers.Hashing(num_bins=3, mask_value='')
>>> inp = [['A'], ['B'], [''], ['C'], ['D']]
>>> layer(inp)
array([[1],
[1],
[0],
[2],
[2]])
**Example (SipHash64)**
>>> layer = keras_core.layers.Hashing(num_bins=3, salt=[133, 137])
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
array([[1],
[2],
[1],
[0],
[2]])
**Example (Siphash64 with a single integer, same as `salt=[133, 133]`)**
>>> layer = keras_core.layers.Hashing(num_bins=3, salt=133)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
array([[0],
[0],
[2],
[1],
[0]])
Args:
num_bins: Number of hash bins. Note that this includes the `mask_value`
bin, so the effective number of bins is `(num_bins - 1)`
if `mask_value` is set.
mask_value: A value that represents masked inputs, which are mapped to
index 0. `None` means no mask term will be added and the
hashing will start at index 0. Defaults to `None`.
salt: A single unsigned integer or None.
If passed, the hash function used will be SipHash64,
with these values used as an additional input
(known as a "salt" in cryptography).
These should be non-zero. If `None`, uses the FarmHash64 hash
function. It also supports tuple/list of 2 unsigned
integer numbers, see reference paper for details.
Defaults to `None`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, or
`"count"` configuring the layer as follows:
- `"int"`: Return the integer bin indices directly.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as `num_bins`, containing a 1
at the input's bin index. If the last dimension is size 1,
will encode on that dimension.
If the last dimension is not size 1, will append a new
dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a
single array the same size as `num_bins`,
containing a 1 for each bin index
index present in the sample. Treats the last dimension
as the sample dimension, if input shape is
`(..., sample_length)`, output shape will be
`(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains a count of
the number of times the bin index appeared in the sample.
Defaults to `"int"`.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
and `"count"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor` instead of
a dense `Tensor`. Defaults to `False`.
**kwargs: Keyword arguments to construct a layer.
Input shape:
A single string, a list of strings, or an `int32` or `int64` tensor
of shape `(batch_size, ...,)`.
Output shape:
An `int32` tensor of shape `(batch_size, ...)`.
Reference:
- [SipHash with salt](https://www.131002.net/siphash/siphash.pdf)
"""
def __init__(
self,
num_bins,
mask_value=None,
salt=None,
output_mode="int",
sparse=False,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer Hashing requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
# By default, output int32 when output_mode='int' and floats otherwise.
if "dtype" not in kwargs or kwargs["dtype"] is None:
kwargs["dtype"] = (
"int64" if output_mode == "int" else backend.floatx()
)
super().__init__(**kwargs)
if num_bins is None or num_bins <= 0:
raise ValueError(
"The `num_bins` for `Hashing` cannot be `None` or "
f"non-positive values. Received: num_bins={num_bins}."
)
if output_mode == "int" and not kwargs["dtype"] in ("int32", "int64"):
raise ValueError(
'When `output_mode="int"`, `dtype` should be an integer '
f"type, 'int32' or 'in64'. Received: dtype={kwargs['dtype']}"
)
# 'output_mode' must be one of (INT, ONE_HOT, MULTI_HOT, COUNT)
accepted_output_modes = ("int", "one_hot", "multi_hot", "count")
if output_mode not in accepted_output_modes:
raise ValueError(
"Invalid value for argument `output_mode`. "
f"Expected one of {accepted_output_modes}. "
f"Received: output_mode={output_mode}"
)
if sparse and output_mode == "int":
raise ValueError(
"`sparse` may only be true if `output_mode` is "
'`"one_hot"`, `"multi_hot"`, or `"count"`. '
f"Received: sparse={sparse} and "
f"output_mode={output_mode}"
)
self.num_bins = num_bins
self.mask_value = mask_value
self.strong_hash = True if salt is not None else False
self.output_mode = output_mode
self.sparse = sparse
self.salt = None
if salt is not None:
if isinstance(salt, (tuple, list)) and len(salt) == 2:
self.salt = list(salt)
elif isinstance(salt, int):
self.salt = [salt, salt]
else:
raise ValueError(
"The `salt` argument for `Hashing` can only be a tuple of "
"size 2 integers, or a single integer. "
f"Received: salt={salt}."
)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
def call(self, inputs):
if not isinstance(
inputs, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)
):
inputs = tf.convert_to_tensor(backend.convert_to_numpy(inputs))
if isinstance(inputs, tf.SparseTensor):
indices = tf.SparseTensor(
indices=inputs.indices,
values=self._hash_values_to_bins(inputs.values),
dense_shape=inputs.dense_shape,
)
else:
indices = self._hash_values_to_bins(inputs)
outputs = tf_utils.encode_categorical_inputs(
indices,
output_mode=self.output_mode,
depth=self.num_bins,
sparse=self.sparse,
dtype=self.dtype,
)
if (
backend.backend() != "tensorflow"
and not backend_utils.in_tf_graph()
):
outputs = backend.convert_to_tensor(outputs)
return outputs
def _hash_values_to_bins(self, values):
"""Converts a non-sparse tensor of values to bin indices."""
hash_bins = self.num_bins
mask = None
# If mask_value is set, the zeroth bin is reserved for it.
if self.mask_value is not None and hash_bins > 1:
hash_bins -= 1
mask = tf.equal(values, self.mask_value)
# Convert all values to strings before hashing.
# Floats are first normalized to int64.
if values.dtype.is_floating:
values = tf.cast(values, dtype="int64")
if values.dtype != tf.string:
values = tf.as_string(values)
# Hash the strings.
if self.strong_hash:
values = tf.strings.to_hash_bucket_strong(
values, hash_bins, name="hash", key=self.salt
)
else:
values = tf.strings.to_hash_bucket_fast(
values, hash_bins, name="hash"
)
if mask is not None:
values = tf.add(values, tf.ones_like(values))
values = tf.where(mask, tf.zeros_like(values), values)
return values
def compute_output_spec(self, inputs):
if self.output_mode == "int":
return backend.KerasTensor(shape=inputs.shape, dtype=self.dtype)
if len(inputs.shape) >= 1:
base_shape = tuple(inputs.shape)[:-1]
else:
base_shape = ()
return backend.KerasTensor(
shape=base_shape + (self.num_bins,), dtype=self.dtype
)
def get_config(self):
config = super().get_config()
config.update(
{
"num_bins": self.num_bins,
"salt": self.salt,
"mask_value": self.mask_value,
"output_mode": self.output_mode,
"sparse": self.sparse,
}
)
return config
| keras-core/keras_core/layers/preprocessing/hashing.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/hashing.py",
"repo_id": "keras-core",
"token_count": 5162
} | 39 |
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer
from keras_core.random.seed_generator import SeedGenerator
@keras_core_export("keras_core.layers.RandomRotation")
class RandomRotation(TFDataLayer):
"""A preprocessing layer which randomly rotates images during training.
This layer will apply random rotations to each image, filling empty space
according to `fill_mode`.
By default, random rotations are only applied during training.
At inference time, the layer does nothing. If you need to apply random
rotations at inference time, set `training` to True when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
factor: a float represented as fraction of 2 Pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating
counter clock-wise,
while a negative value means clock-wise.
When represented as a single
float, this value is used for both the upper and lower bound.
For instance, `factor=(-0.2, 0.3)`
results in an output rotation by a random
amount in the range `[-20% * 2pi, 30% * 2pi]`.
`factor=0.2` results in an
output rotating by a random amount
in the range `[-20% * 2pi, 20% * 2pi]`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode
(one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about
the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)`
The input is extended by
filling all values beyond the edge with
the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside
the boundaries when `fill_mode="constant"`.
"""
_FACTOR_VALIDATION_ERROR = (
"The `factor` argument should be a number (or a list of two numbers) "
"in the range [-1.0, 1.0]. "
)
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
_SUPPORTED_FILL_MODE = ("reflect", "wrap", "constant", "nearest")
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
def __init__(
self,
factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
value_range=(0, 255),
data_format=None,
**kwargs,
):
super().__init__(**kwargs)
self.seed = seed
self.generator = SeedGenerator(seed)
self._set_factor(factor)
self._set_value_range(value_range)
self.data_format = backend.standardize_data_format(data_format)
self.fill_mode = fill_mode
self.interpolation = interpolation
self.fill_value = fill_value
self.supports_jit = False
if self.fill_mode not in self._SUPPORTED_FILL_MODE:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Expected of one "
f"{self._SUPPORTED_FILL_MODE}."
)
if self.interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self.value_range_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self.value_range_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def _set_factor(self, factor):
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(
self._FACTOR_VALIDATION_ERROR + f"Received: factor={factor}"
)
self._check_factor_range(factor[0])
self._check_factor_range(factor[1])
self._factor = sorted(factor)
elif isinstance(factor, (int, float)):
self._check_factor_range(factor)
factor = abs(factor)
self._factor = [-factor, factor]
else:
raise ValueError(
self._FACTOR_VALIDATION_ERROR + f"Received: factor={factor}"
)
def _check_factor_range(self, input_number):
if input_number > 1.0 or input_number < -1.0:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: input_number={input_number}"
)
"""
Assume an angle ø, then rotation matrix is defined by
| cos(ø) -sin(ø) x_offset |
| sin(ø) cos(ø) y_offset |
| 0 0 1 |
This function is returning the 8 elements barring the final 1 as a 1D array
"""
def _get_rotation_matrix(self, inputs):
shape = self.backend.core.shape(inputs)
if len(shape) == 4:
if self.data_format == "channels_last":
batch_size = shape[0]
image_height = shape[1]
image_width = shape[2]
else:
batch_size = shape[1]
image_height = shape[2]
image_width = shape[3]
else:
batch_size = 1
if self.data_format == "channels_last":
image_height = shape[0]
image_width = shape[1]
else:
image_height = shape[1]
image_width = shape[2]
image_height = float(image_height)
image_width = float(image_width)
lower = self._factor[0] * 2.0 * self.backend.convert_to_tensor(np.pi)
upper = self._factor[1] * 2.0 * self.backend.convert_to_tensor(np.pi)
seed_generator = self._get_seed_generator(self.backend._backend)
angle = self.backend.random.uniform(
shape=(batch_size,),
minval=lower,
maxval=upper,
seed=seed_generator,
)
cos_theta = self.backend.numpy.cos(angle)
sin_theta = self.backend.numpy.sin(angle)
x_offset = (
(image_width - 1)
- (cos_theta * (image_width - 1) - sin_theta * (image_height - 1))
) / 2.0
y_offset = (
(image_height - 1)
- (sin_theta * (image_width - 1) + cos_theta * (image_height - 1))
) / 2.0
outputs = self.backend.numpy.concatenate(
[
self.backend.numpy.cos(angle)[:, None],
-self.backend.numpy.sin(angle)[:, None],
x_offset[:, None],
self.backend.numpy.sin(angle)[:, None],
self.backend.numpy.cos(angle)[:, None],
y_offset[:, None],
self.backend.numpy.zeros((batch_size, 2)),
],
axis=1,
)
if len(shape) == 3:
outputs = self.backend.numpy.squeeze(outputs, axis=0)
return outputs
def call(self, inputs, training=True):
inputs = self.backend.cast(inputs, self.compute_dtype)
if training:
rotation_matrix = self._get_rotation_matrix(inputs)
transformed_image = self.backend.image.affine_transform(
image=inputs,
transform=rotation_matrix,
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
return transformed_image
else:
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self._factor,
"value_range": self.value_range,
"data_format": self.data_format,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/preprocessing/random_rotation.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/random_rotation.py",
"repo_id": "keras-core",
"token_count": 4569
} | 40 |
from keras_core import regularizers
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0.0, l2=0.0, **kwargs):
super().__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs
)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {"l1": self.l1, "l2": self.l2}
return {**base_config, **config}
| keras-core/keras_core/layers/regularization/activity_regularization.py/0 | {
"file_path": "keras-core/keras_core/layers/regularization/activity_regularization.py",
"repo_id": "keras-core",
"token_count": 491
} | 41 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import layers
from keras_core import ops
from keras_core import testing
class Cropping3DTest(testing.TestCase, parameterized.TestCase):
@parameterized.product(
(
{"dim1_cropping": (1, 2), "dim1_expected": (1, 5)}, # both
{"dim1_cropping": (0, 2), "dim1_expected": (0, 5)}, # left only
{"dim1_cropping": (1, 0), "dim1_expected": (1, 7)}, # right only
),
(
{"dim2_cropping": (3, 4), "dim2_expected": (3, 5)}, # both
{"dim2_cropping": (0, 4), "dim2_expected": (0, 5)}, # left only
{"dim2_cropping": (3, 0), "dim2_expected": (3, 9)}, # right only
),
(
{"dim3_cropping": (5, 6), "dim3_expected": (5, 7)}, # both
{"dim3_cropping": (0, 6), "dim3_expected": (0, 7)}, # left only
{"dim3_cropping": (5, 0), "dim3_expected": (5, 13)}, # right only
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
@pytest.mark.requires_trainable_backend
def test_cropping_3d(
self,
dim1_cropping,
dim2_cropping,
dim3_cropping,
data_format,
dim1_expected,
dim2_expected,
dim3_expected,
):
if data_format == "channels_first":
inputs = np.random.rand(3, 5, 7, 9, 13)
expected_output = ops.convert_to_tensor(
inputs[
:,
:,
dim1_expected[0] : dim1_expected[1],
dim2_expected[0] : dim2_expected[1],
dim3_expected[0] : dim3_expected[1],
]
)
else:
inputs = np.random.rand(3, 7, 9, 13, 5)
expected_output = ops.convert_to_tensor(
inputs[
:,
dim1_expected[0] : dim1_expected[1],
dim2_expected[0] : dim2_expected[1],
dim3_expected[0] : dim3_expected[1],
:,
]
)
cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
self.run_layer_test(
layers.Cropping3D,
init_kwargs={"cropping": cropping, "data_format": data_format},
input_data=inputs,
expected_output=expected_output,
)
@parameterized.product(
(
# same cropping values with 3 tuples
{
"cropping": ((2, 2), (2, 2), (2, 2)),
"expected": ((2, 5), (2, 7), (2, 11)),
},
# same cropping values with 1 tuple
{"cropping": (2, 2, 2), "expected": ((2, 5), (2, 7), (2, 11))},
# same cropping values with an integer
{"cropping": 2, "expected": ((2, 5), (2, 7), (2, 11))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
@pytest.mark.requires_trainable_backend
def test_cropping_3d_with_same_cropping(
self, cropping, data_format, expected
):
if data_format == "channels_first":
inputs = np.random.rand(3, 5, 7, 9, 13)
expected_output = ops.convert_to_tensor(
inputs[
:,
:,
expected[0][0] : expected[0][1],
expected[1][0] : expected[1][1],
expected[2][0] : expected[2][1],
]
)
else:
inputs = np.random.rand(3, 7, 9, 13, 5)
expected_output = ops.convert_to_tensor(
inputs[
:,
expected[0][0] : expected[0][1],
expected[1][0] : expected[1][1],
expected[2][0] : expected[2][1],
:,
]
)
self.run_layer_test(
layers.Cropping3D,
init_kwargs={"cropping": cropping, "data_format": data_format},
input_data=inputs,
expected_output=expected_output,
)
def test_cropping_3d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, 7, None, 13, 5))
cropped = layers.Cropping3D(((1, 2), (3, 4), (5, 6)))(input_layer)
self.assertEqual(cropped.shape, (1, 4, None, 2, 5))
@parameterized.product(
(
{"cropping": ((3, 6), (0, 0), (0, 0))},
{"cropping": ((0, 0), (5, 8), (0, 0))},
{"cropping": ((0, 0), (0, 0), (7, 6))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_cropping_3d_errors_if_cropping_more_than_available(
self, cropping, data_format
):
input_layer = layers.Input(batch_shape=(3, 7, 9, 13, 5))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=cropping, data_format=data_format)(
input_layer
)
def test_cropping_3d_errors_if_cropping_argument_invalid(self):
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1,))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1, 2))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1, 2, 3, 4))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping="1")
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), (5, 6, 7)))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), (5, -6)))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), "5"))
| keras-core/keras_core/layers/reshaping/cropping3d_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/cropping3d_test.py",
"repo_id": "keras-core",
"token_count": 3252
} | 42 |
import numpy as np
from absl.testing import parameterized
from keras_core import layers
from keras_core import testing
class ZeroPadding1DTest(testing.TestCase, parameterized.TestCase):
def test_zero_padding_1d(self):
inputs = np.random.rand(1, 2, 3)
outputs = layers.ZeroPadding1D(padding=(1, 2))(inputs)
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, :], inputs)
@parameterized.named_parameters(("one_tuple", (2, 2)), ("one_int", 2))
def test_zero_padding_1d_with_same_padding(self, padding):
inputs = np.random.rand(1, 2, 3)
outputs = layers.ZeroPadding1D(padding=padding)(inputs)
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, :], inputs)
def test_zero_padding_1d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, None, 3))
padded = layers.ZeroPadding1D((1, 2))(input_layer)
self.assertEqual(padded.shape, (1, None, 3))
def test_zero_padding_1d_errors_if_padding_argument_invalid(self):
with self.assertRaises(ValueError):
layers.ZeroPadding1D(padding=(1,))
with self.assertRaises(ValueError):
layers.ZeroPadding1D(padding=(1, 2, 3))
with self.assertRaises(ValueError):
layers.ZeroPadding1D(padding="1")
| keras-core/keras_core/layers/reshaping/zero_padding1d_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/zero_padding1d_test.py",
"repo_id": "keras-core",
"token_count": 639
} | 43 |
from keras_core import backend
from keras_core import ops
class DropoutRNNCell:
"""Object that holds dropout-related functionality for RNN cells.
This class is not a standalone RNN cell. It suppose to be used with a RNN
cell by multiple inheritance. Any cell that mix with class should have
following fields:
- `dropout`: a float number in the range `[0, 1]`.
Dropout rate for the input tensor.
- `recurrent_dropout`: a float number in the range `[0, 1]`.
Dropout rate for the recurrent connections.
- `seed_generator`, an instance of `backend.random.SeedGenerator`.
This object will create and cache dropout masks, and reuse them for
all incoming steps, so that the same mask is used for every step.
"""
def get_dropout_mask(self, step_input):
if not hasattr(self, "_dropout_mask"):
self._dropout_mask = None
if self._dropout_mask is None and self.dropout > 0:
ones = ops.ones_like(step_input)
self._dropout_mask = backend.random.dropout(
ones, rate=self.dropout, seed=self.seed_generator
)
return self._dropout_mask
def get_recurrent_dropout_mask(self, step_input):
if not hasattr(self, "_recurrent_dropout_mask"):
self._recurrent_dropout_mask = None
if self._recurrent_dropout_mask is None and self.recurrent_dropout > 0:
ones = ops.ones_like(step_input)
self._recurrent_dropout_mask = backend.random.dropout(
ones, rate=self.dropout, seed=self.seed_generator
)
return self._recurrent_dropout_mask
def reset_dropout_mask(self):
"""Reset the cached dropout mask if any.
The RNN layer invokes this in the `call()` method
so that the cached mask is cleared after calling `cell.call()`. The
mask should be cached across all timestep within the same batch, but
shouldn't be cached between batches.
"""
self._dropout_mask = None
def reset_recurrent_dropout_mask(self):
self._recurrent_dropout_mask = None
| keras-core/keras_core/layers/rnn/dropout_rnn_cell.py/0 | {
"file_path": "keras-core/keras_core/layers/rnn/dropout_rnn_cell.py",
"repo_id": "keras-core",
"token_count": 830
} | 44 |
"""Legacy Keras 1/2 layers.
AlphaDropout
RandomHeight
RandomWidth
ThresholdedReLU
"""
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
from keras_core.utils.module_utils import tensorflow as tf
@keras_core_export("keras_core._legacy.layers.AlphaDropout")
class AlphaDropout(Layer):
"""DEPRECATED."""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
if self.noise_shape is None:
noise_shape = tf.shape(inputs)
else:
noise_shape = self.noise_shape
kept_idx = tf.greater_equal(
backend.random.uniform(noise_shape),
self.rate,
seed=self.seed_generator,
)
kept_idx = tf.cast(kept_idx, inputs.dtype)
# Get affine transformation params
a = ((1 - self.rate) * (1 + self.rate * alpha_p**2)) ** -0.5
b = -a * alpha_p * self.rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
# Do affine transformation
return a * x + b
return inputs
def get_config(self):
config = {"rate": self.rate, "seed": self.seed}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
@keras_core_export("keras_core._legacy.layers.RandomHeight")
class RandomHeight(Layer):
"""DEPRECATED."""
def __init__(self, factor, interpolation="bilinear", seed=None, **kwargs):
super().__init__(**kwargs)
self.seed_generator = backend.random.SeedGenerator(seed)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.height_lower = factor[0]
self.height_upper = factor[1]
else:
self.height_lower = -factor
self.height_upper = factor
if self.height_upper < self.height_lower:
raise ValueError(
"`factor` argument cannot have an upper bound lesser than the "
f"lower bound. Received: factor={factor}"
)
if self.height_lower < -1.0 or self.height_upper < -1.0:
raise ValueError(
"`factor` argument must have values larger than -1. "
f"Received: factor={factor}"
)
self.interpolation = interpolation
self.seed = seed
def call(self, inputs, training=True):
inputs = tf.convert_to_tensor(inputs, dtype=self.compute_dtype)
def random_height_inputs(inputs):
"""Inputs height-adjusted with random ops."""
inputs_shape = tf.shape(inputs)
img_hd = tf.cast(inputs_shape[-3], tf.float32)
img_wd = inputs_shape[-2]
height_factor = backend.random.uniform(
shape=[],
minval=(1.0 + self.height_lower),
maxval=(1.0 + self.height_upper),
seed=self.seed_generator,
)
adjusted_height = tf.cast(height_factor * img_hd, tf.int32)
adjusted_size = tf.stack([adjusted_height, img_wd])
output = tf.image.resize(
images=inputs,
size=adjusted_size,
method=self.interpolation,
)
# tf.resize will output float32 regardless of input type.
output = tf.cast(output, self.compute_dtype)
output_shape = inputs.shape.as_list()
output_shape[-3] = None
output.set_shape(output_shape)
return output
if training:
return random_height_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
input_shape[-3] = None
return tuple(input_shape)
def get_config(self):
config = {
"factor": self.factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_core_export("keras_core._legacy.layers.RandomWidth")
class RandomWidth(Layer):
"""DEPRECATED."""
def __init__(self, factor, interpolation="bilinear", seed=None, **kwargs):
super().__init__(**kwargs)
self.seed_generator = backend.random.SeedGenerator(seed)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.width_lower = factor[0]
self.width_upper = factor[1]
else:
self.width_lower = -factor
self.width_upper = factor
if self.width_upper < self.width_lower:
raise ValueError(
"`factor` argument cannot have an upper bound less than the "
f"lower bound. Received: factor={factor}"
)
if self.width_lower < -1.0 or self.width_upper < -1.0:
raise ValueError(
"`factor` argument must have values larger than -1. "
f"Received: factor={factor}"
)
self.interpolation = interpolation
self.seed = seed
def call(self, inputs, training=True):
inputs = tf.convert_to_tensor(inputs, dtype=self.compute_dtype)
def random_width_inputs(inputs):
"""Inputs width-adjusted with random ops."""
inputs_shape = tf.shape(inputs)
img_hd = inputs_shape[-3]
img_wd = tf.cast(inputs_shape[-2], tf.float32)
width_factor = backend.random.uniform(
shape=[],
minval=(1.0 + self.width_lower),
maxval=(1.0 + self.width_upper),
seed=self.seed_generator,
)
adjusted_width = tf.cast(width_factor * img_wd, tf.int32)
adjusted_size = tf.stack([img_hd, adjusted_width])
output = tf.image.resize(
images=inputs,
size=adjusted_size,
method=self.interpolation,
)
# tf.resize will output float32 regardless of input type.
output = tf.cast(output, self.compute_dtype)
output_shape = inputs.shape.as_list()
output_shape[-2] = None
output.set_shape(output_shape)
return output
if training:
return random_width_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
input_shape[-2] = None
return tuple(input_shape)
def get_config(self):
config = {
"factor": self.factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_core_export("keras_core._legacy.layers.ThresholdedReLU")
class ThresholdedReLU(Layer):
"""DEPRECATED."""
def __init__(self, theta=1.0, **kwargs):
super().__init__(**kwargs)
if theta is None:
raise ValueError(
"Theta of a Thresholded ReLU layer cannot be None, expecting a "
f"float. Received: {theta}"
)
if theta < 0:
raise ValueError(
"The theta value of a Thresholded ReLU layer "
f"should be >=0. Received: {theta}"
)
self.supports_masking = True
self.theta = tf.convert_to_tensor(theta, dtype=self.compute_dtype)
def call(self, inputs):
dtype = self.compute_dtype
return inputs * tf.cast(tf.greater(inputs, self.theta), dtype)
def get_config(self):
config = {"theta": float(self.theta)}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
| keras-core/keras_core/legacy/layers.py/0 | {
"file_path": "keras-core/keras_core/legacy/layers.py",
"repo_id": "keras-core",
"token_count": 4092
} | 45 |
import numpy as np
import pytest
from keras_core import backend
from keras_core import losses as losses_module
from keras_core import ops
from keras_core import testing
from keras_core.losses.loss import Loss
class ExampleLoss(Loss):
def call(self, y_true, y_pred):
return (y_true - y_pred) ** 2
class LossTest(testing.TestCase):
def test_reduction(self):
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
# No reduction
loss_fn = ExampleLoss(reduction=None)
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose((y_true - y_pred) ** 2, loss)
# sum
loss_fn = ExampleLoss(reduction="sum")
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(np.sum((y_true - y_pred) ** 2), loss)
# sum_over_batch_size
loss_fn = ExampleLoss(reduction="sum_over_batch_size")
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(np.sum((y_true - y_pred) ** 2) / 4, loss)
# bad reduction
with self.assertRaisesRegex(ValueError, "Invalid value for argument"):
ExampleLoss(reduction="abc")
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_mask(self):
mask = np.array([True, False, True, True])
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
masked_y_true = np.array([1.0, 1.0, 0.0])
masked_y_pred = np.array([0.1, 0.3, 0.4])
mask = ops.convert_to_tensor(mask)
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
y_pred._keras_mask = mask
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(
np.sum((masked_y_true - masked_y_pred) ** 2) / 3, loss
)
# Test edge case where everything is masked.
mask = np.array([False, False, False, False])
y_pred._keras_mask = mask
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(loss, 0) # No NaN.
def test_sample_weight(self):
sample_weight = np.array([0.4, 0.3, 0.2, 0.1])
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(
np.sum(sample_weight * (y_true - y_pred) ** 2) / 4, loss
)
# Test edge case where every weight is 0.
sample_weight = np.array([0.0, 0.0, 0.0, 0.0])
loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(loss, 0) # No NaN.
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_mask_and_sample_weight(self):
sample_weight = np.array([0.4, 0.3, 0.2, 0.1])
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
mask = np.array([True, False, True, True])
masked_sample_weight = np.array([0.4, 0.2, 0.1])
masked_y_true = np.array([1.0, 1.0, 0.0])
masked_y_pred = np.array([0.1, 0.3, 0.4])
mask = ops.convert_to_tensor(mask)
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
y_pred._keras_mask = mask
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(
np.sum(masked_sample_weight * (masked_y_true - masked_y_pred) ** 2)
/ 3,
loss,
)
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_mask_and_sample_weight_rank2(self):
# check loss of inputs with duplicate rows doesn't change
sample_weight = np.array([0.4, 0.3, 0.2, 0.1])
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
mask = np.array([True, False, True, True])
mask = ops.convert_to_tensor(mask)
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
y_pred._keras_mask = mask
loss_fn = ExampleLoss()
rank1_loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
# duplicate rows
mask = ops.tile(ops.expand_dims(mask, axis=0), (2, 1))
y_true = ops.tile(ops.expand_dims(y_true, axis=0), (2, 1))
y_pred = ops.tile(ops.expand_dims(y_pred, axis=0), (2, 1))
sample_weight = ops.tile(ops.expand_dims(sample_weight, axis=0), (2, 1))
y_pred._keras_mask = mask
rank2_loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(rank1_loss, rank2_loss)
# @testing.parametrize(
# "uprank", ["mask", "sample_weight", "y_true", "y_pred"])
# TODO: use parameterization decorator
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_rank_adjustment(self):
for uprank in ["mask", "sample_weight", "ys"]:
sample_weight = np.array([0.4, 0.3, 0.2, 0.1])
y_true = np.array([1.0, 0.0, 1.0, 0.0])
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
mask = np.array([True, False, True, True])
if uprank == "mask":
mask = np.expand_dims(mask, -1)
elif uprank == "sample_weight":
sample_weight = np.expand_dims(sample_weight, -1)
elif uprank == "ys":
y_true = np.expand_dims(y_true, -1)
y_pred = np.expand_dims(y_pred, -1)
masked_sample_weight = np.array([0.4, 0.2, 0.1])
masked_y_true = np.array([1.0, 1.0, 0.0])
masked_y_pred = np.array([0.1, 0.3, 0.4])
mask = ops.convert_to_tensor(mask)
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
y_pred._keras_mask = mask
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(
np.sum(
masked_sample_weight * (masked_y_true - masked_y_pred) ** 2
)
/ 3,
loss,
)
def test_mixed_dtypes(self):
sample_weight = np.array([0.4, 0.3, 0.2, 0.1], dtype="float64")
y_true = np.array([1.0, 0.0, 1.0, 0.0], dtype="int32")
y_pred = np.array([0.1, 0.2, 0.3, 0.4], dtype="float32")
loss_fn = ExampleLoss()
loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float32")
self.assertAllClose(
np.sum(sample_weight * (y_true - y_pred) ** 2) / 4,
loss,
)
def test_get_method(self):
loss = losses_module.get("mse")
self.assertEqual(loss, losses_module.mean_squared_error)
loss = losses_module.get(None)
self.assertEqual(loss, None)
with self.assertRaises(ValueError):
losses_module.get("typo")
def test_dtype_arg(self):
y_true = np.array([1.0, 0.0, 1.0, 0.0], dtype="float32")
y_pred = np.array([0.1, 0.2, 0.3, 0.4], dtype="float32")
# Note: we use float16 and not float64 to test this because
# JAX will map float64 to float32.
loss_fn = ExampleLoss(dtype="float16")
loss = loss_fn(y_true, y_pred)
self.assertEqual(backend.standardize_dtype(loss.dtype), "float16")
| keras-core/keras_core/losses/loss_test.py/0 | {
"file_path": "keras-core/keras_core/losses/loss_test.py",
"repo_id": "keras-core",
"token_count": 4238
} | 46 |
from enum import Enum
import numpy as np
from keras_core import backend
from keras_core import ops
from keras_core.losses.loss import squeeze_to_same_rank
from keras_core.utils.python_utils import to_list
NEG_INF = -1e10
def assert_thresholds_range(thresholds):
if thresholds is not None:
invalid_thresholds = [
t for t in thresholds if t is None or t < 0 or t > 1
]
if invalid_thresholds:
raise ValueError(
"Threshold values must be in [0, 1]. "
f"Received: {invalid_thresholds}"
)
def parse_init_thresholds(thresholds, default_threshold=0.5):
if thresholds is not None:
assert_thresholds_range(to_list(thresholds))
thresholds = to_list(
default_threshold if thresholds is None else thresholds
)
return thresholds
class ConfusionMatrix(Enum):
TRUE_POSITIVES = "tp"
FALSE_POSITIVES = "fp"
TRUE_NEGATIVES = "tn"
FALSE_NEGATIVES = "fn"
class AUCCurve(Enum):
"""Type of AUC Curve (ROC or PR)."""
ROC = "ROC"
PR = "PR"
@staticmethod
def from_str(key):
if key in ("pr", "PR"):
return AUCCurve.PR
elif key in ("roc", "ROC"):
return AUCCurve.ROC
else:
raise ValueError(
f'Invalid AUC curve value: "{key}". '
'Expected values are ["PR", "ROC"]'
)
class AUCSummationMethod(Enum):
"""Type of AUC summation method.
https://en.wikipedia.org/wiki/Riemann_sum)
Contains the following values:
* 'interpolation': Applies mid-point summation scheme for `ROC` curve. For
`PR` curve, interpolates (true/false) positives but not the ratio that is
precision (see Davis & Goadrich 2006 for details).
* 'minoring': Applies left summation for increasing intervals and right
summation for decreasing intervals.
* 'majoring': Applies right summation for increasing intervals and left
summation for decreasing intervals.
"""
INTERPOLATION = "interpolation"
MAJORING = "majoring"
MINORING = "minoring"
@staticmethod
def from_str(key):
if key in ("interpolation", "Interpolation"):
return AUCSummationMethod.INTERPOLATION
elif key in ("majoring", "Majoring"):
return AUCSummationMethod.MAJORING
elif key in ("minoring", "Minoring"):
return AUCSummationMethod.MINORING
else:
raise ValueError(
f'Invalid AUC summation method value: "{key}". '
'Expected values are ["interpolation", "majoring", "minoring"]'
)
def _update_confusion_matrix_variables_optimized(
variables_to_update,
y_true,
y_pred,
thresholds,
multi_label=False,
sample_weights=None,
label_weights=None,
thresholds_with_epsilon=False,
):
"""Update confusion matrix variables with memory efficient alternative.
Note that the thresholds need to be evenly distributed within the list, eg,
the diff between consecutive elements are the same.
To compute TP/FP/TN/FN, we are measuring a binary classifier
C(t) = (predictions >= t)
at each threshold 't'. So we have
TP(t) = sum( C(t) * true_labels )
FP(t) = sum( C(t) * false_labels )
But, computing C(t) requires computation for each t. To make it fast,
observe that C(t) is a cumulative integral, and so if we have
thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
where n = num_thresholds, and if we can compute the bucket function
B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
then we get
C(t_i) = sum( B(j), j >= i )
which is the reversed cumulative sum in ops.cumsum().
We can compute B(i) efficiently by taking advantage of the fact that
our thresholds are evenly distributed, in that
width = 1.0 / (num_thresholds - 1)
thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
Given a prediction value p, we can map it to its bucket by
bucket_index(p) = floor( p * (num_thresholds - 1) )
so we can use ops.segment_sum() to update the buckets in one pass.
Consider following example:
y_true = [0, 0, 1, 1]
y_pred = [0.1, 0.5, 0.3, 0.9]
thresholds = [0.0, 0.5, 1.0]
num_buckets = 2 # [0.0, 1.0], (1.0, 2.0]
bucket_index(y_pred) = ops.floor(y_pred * num_buckets)
= ops.floor([0.2, 1.0, 0.6, 1.8])
= [0, 0, 0, 1]
# The meaning of this bucket is that if any of the label is true,
# then 1 will be added to the corresponding bucket with the index.
# Eg, if the label for 0.2 is true, then 1 will be added to bucket 0. If the
# label for 1.8 is true, then 1 will be added to bucket 1.
#
# Note the second item "1.0" is floored to 0, since the value need to be
# strictly larger than the bucket lower bound.
# In the implementation, we use ops.ceil() - 1 to achieve this.
tp_bucket_value = ops.segment_sum(true_labels, bucket_indices,
num_segments=num_thresholds)
= [1, 1, 0]
# For [1, 1, 0] here, it means there is 1 true value contributed by bucket
# 0, and 1 value contributed by bucket 1. When we aggregate them to
# together, the result become [a + b + c, b + c, c], since large thresholds
# will always contribute to the value for smaller thresholds.
true_positive = ops.cumsum(tp_bucket_value, reverse=True)
= [2, 1, 0]
This implementation exhibits a run time and space complexity of O(T + N),
where T is the number of thresholds and N is the size of predictions.
Metrics that rely on standard implementation instead exhibit a complexity of
O(T * N).
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid
keys and corresponding variables to update as values.
y_true: A floating point `Tensor` whose shape matches `y_pred`. Will be
cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A sorted floating point `Tensor` with value in `[0, 1]`.
It need to be evenly distributed (the diff between each element need
to be the same).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or
flattened into a single label. When True, the valus of
`variables_to_update` must have a second dimension equal to the
number of labels in y_true and y_pred, and those tensors must not be
RaggedTensors.
sample_weights: Optional `Tensor` whose rank is either 0, or the same
rank as `y_true`, and must be broadcastable to `y_true` (i.e., all
dimensions must be either `1`, or the same as the corresponding
`y_true` dimension).
label_weights: Optional tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN
without explicit multilabel handling (i.e. when the data is to be
flattened).
thresholds_with_epsilon: Optional boolean indicating whether the leading
and tailing thresholds has any epsilon added for floating point
imprecisions. It will change how we handle the leading and tailing
bucket.
"""
num_thresholds = ops.shape(thresholds)[0]
if sample_weights is None:
sample_weights = 1.0
else:
sample_weights = ops.broadcast_to(
ops.cast(sample_weights, dtype=y_pred.dtype), ops.shape(y_pred)
)
if not multi_label:
sample_weights = ops.reshape(sample_weights, [-1])
if label_weights is None:
label_weights = 1.0
else:
label_weights = ops.expand_dims(label_weights, 0)
label_weights = ops.broadcast_to(label_weights, ops.shape(y_pred))
if not multi_label:
label_weights = ops.reshape(label_weights, [-1])
weights = ops.cast(
ops.multiply(sample_weights, label_weights), y_true.dtype
)
# We shouldn't need this, but in case there are predict value that is out of
# the range of [0.0, 1.0]
y_pred = ops.clip(y_pred, x_min=0.0, x_max=1.0)
y_true = ops.cast(ops.cast(y_true, "bool"), y_true.dtype)
if not multi_label:
y_true = ops.reshape(y_true, [-1])
y_pred = ops.reshape(y_pred, [-1])
true_labels = ops.multiply(y_true, weights)
false_labels = ops.multiply((1.0 - y_true), weights)
# Compute the bucket indices for each prediction value.
# Since the predict value has to be strictly greater than the thresholds,
# eg, buckets like [0, 0.5], (0.5, 1], and 0.5 belongs to first bucket.
# We have to use math.ceil(val) - 1 for the bucket.
bucket_indices = (
ops.ceil(y_pred * (ops.cast(num_thresholds, dtype=y_pred.dtype) - 1))
- 1
)
if thresholds_with_epsilon:
# In this case, the first bucket should actually take into account since
# the any prediction between [0.0, 1.0] should be larger than the first
# threshold. We change the bucket value from -1 to 0.
bucket_indices = ops.relu(bucket_indices)
bucket_indices = ops.cast(bucket_indices, "int32")
if multi_label:
# We need to run bucket segment sum for each of the label class. In the
# multi_label case, the rank of the label is 2. We first transpose it so
# that the label dim becomes the first and we can parallel run though
# them.
true_labels = ops.transpose(true_labels)
false_labels = ops.transpose(false_labels)
bucket_indices = ops.transpose(bucket_indices)
def gather_bucket(label_and_bucket_index):
label, bucket_index = (
label_and_bucket_index[0],
label_and_bucket_index[1],
)
return ops.segment_sum(
data=label,
segment_ids=bucket_index,
num_segments=num_thresholds,
)
tp_bucket_v = backend.vectorized_map(
gather_bucket,
(true_labels, bucket_indices),
)
fp_bucket_v = backend.vectorized_map(
gather_bucket, (false_labels, bucket_indices)
)
tp = ops.transpose(ops.flip(ops.cumsum(ops.flip(tp_bucket_v), axis=1)))
fp = ops.transpose(ops.flip(ops.cumsum(ops.flip(fp_bucket_v), axis=1)))
else:
tp_bucket_v = ops.segment_sum(
data=true_labels,
segment_ids=bucket_indices,
num_segments=num_thresholds,
)
fp_bucket_v = ops.segment_sum(
data=false_labels,
segment_ids=bucket_indices,
num_segments=num_thresholds,
)
tp = ops.flip(ops.cumsum(ops.flip(tp_bucket_v)))
fp = ops.flip(ops.cumsum(ops.flip(fp_bucket_v)))
# fn = sum(true_labels) - tp
# tn = sum(false_labels) - fp
if (
ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
or ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
):
if multi_label:
total_true_labels = ops.sum(true_labels, axis=1)
total_false_labels = ops.sum(false_labels, axis=1)
else:
total_true_labels = ops.sum(true_labels)
total_false_labels = ops.sum(false_labels)
if ConfusionMatrix.TRUE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_POSITIVES]
variable.assign(variable + tp)
if ConfusionMatrix.FALSE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_POSITIVES]
variable.assign(variable + fp)
if ConfusionMatrix.TRUE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_NEGATIVES]
tn = total_false_labels - fp
variable.assign(variable + tn)
if ConfusionMatrix.FALSE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_NEGATIVES]
fn = total_true_labels - tp
variable.assign(variable + fn)
def is_evenly_distributed_thresholds(thresholds):
"""Check if the thresholds list is evenly distributed.
We could leverage evenly distributed thresholds to use less memory when
calculate metrcis like AUC where each individual threshold need to be
evaluated.
Args:
thresholds: A python list or tuple, or 1D numpy array whose value is
ranged in [0, 1].
Returns:
boolean, whether the values in the inputs are evenly distributed.
"""
# Check the list value and see if it is evenly distributed.
num_thresholds = len(thresholds)
if num_thresholds < 3:
return False
even_thresholds = np.arange(num_thresholds, dtype=np.float32) / (
num_thresholds - 1
)
return np.allclose(thresholds, even_thresholds, atol=backend.epsilon())
def update_confusion_matrix_variables(
variables_to_update,
y_true,
y_pred,
thresholds,
top_k=None,
class_id=None,
sample_weight=None,
multi_label=False,
label_weights=None,
thresholds_distributed_evenly=False,
):
"""Updates the given confusion matrix variables.
For every pair of values in y_true and y_pred:
true_positive: y_true == True and y_pred > thresholds
false_negatives: y_true == True and y_pred <= thresholds
true_negatives: y_true == False and y_pred <= thresholds
false_positive: y_true == False and y_pred > thresholds
The results will be weighted and added together. When multiple thresholds
are provided, we will repeat the same for every threshold.
For estimation of these metrics over a stream of data, the function creates
an `update_op` operation that updates the given variables.
If `sample_weight` is `None`, weights default to 1.
Use weights of 0 to mask values.
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are
in the range `[0, 1]`.
thresholds: A float value, float tensor, python list, or tuple of float
thresholds in `[0, 1]`, or NEG_INF (used when top_k is set).
top_k: Optional int, indicates that the positive labels should be limited
to the top k predictions.
class_id: Optional int, limits the prediction and labels to the class
specified by this argument.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank
as `y_true`, and must be broadcastable to `y_true` (i.e., all dimensions
must be either `1`, or the same as the corresponding `y_true`
dimension).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or
flattened into a single label. When True, the valus of
`variables_to_update` must have a second dimension equal to the number
of labels in y_true and y_pred, and those tensors must not be
RaggedTensors.
label_weights: (optional) tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN
without explicit multilabel handling (i.e. when the data is to be
flattened).
thresholds_distributed_evenly: Boolean, whether the thresholds are evenly
distributed within the list. An optimized method will be used if this is
the case. See _update_confusion_matrix_variables_optimized() for more
details.
Raises:
ValueError: If `y_pred` and `y_true` have mismatched shapes, or if
`sample_weight` is not `None` and its shape doesn't match `y_pred`, or
if `variables_to_update` contains invalid keys.
"""
if multi_label and label_weights is not None:
raise ValueError(
"`label_weights` for multilabel data should be handled "
"outside of `update_confusion_matrix_variables` when "
"`multi_label` is True."
)
if variables_to_update is None:
return
if not any(
key for key in variables_to_update if key in list(ConfusionMatrix)
):
raise ValueError(
"Please provide at least one valid confusion matrix "
"variable to update. Valid variable key options are: "
f'"{list(ConfusionMatrix)}". '
f'Received: "{variables_to_update.keys()}"'
)
variable_dtype = list(variables_to_update.values())[0].dtype
y_true = ops.cast(y_true, dtype=variable_dtype)
y_pred = ops.cast(y_pred, dtype=variable_dtype)
if thresholds_distributed_evenly:
# Check whether the thresholds has any leading or tailing epsilon added
# for floating point imprecision. The leading and tailing threshold will
# be handled bit differently as the corner case. At this point,
# thresholds should be a list/array with more than 2 items, and ranged
# between [0, 1]. See is_evenly_distributed_thresholds() for more
# details.
thresholds_with_epsilon = thresholds[0] < 0.0 or thresholds[-1] > 1.0
thresholds = ops.convert_to_tensor(thresholds, dtype=variable_dtype)
num_thresholds = ops.shape(thresholds)[0]
if multi_label:
one_thresh = ops.equal(
np.array(1, dtype="int32"),
len(thresholds.shape),
)
else:
one_thresh = np.array(True, dtype="bool")
invalid_keys = [
key for key in variables_to_update if key not in list(ConfusionMatrix)
]
if invalid_keys:
raise ValueError(
f'Invalid keys: "{invalid_keys}". '
f'Valid variable key options are: "{list(ConfusionMatrix)}"'
)
y_pred, y_true = squeeze_to_same_rank(y_pred, y_true)
if sample_weight is not None:
sample_weight = ops.expand_dims(
ops.cast(sample_weight, dtype=variable_dtype), axis=-1
)
_, sample_weight = squeeze_to_same_rank(y_true, sample_weight)
if top_k is not None:
y_pred = _filter_top_k(y_pred, top_k)
if class_id is not None:
if len(y_pred.shape) == 1:
raise ValueError(
"When class_id is provided, y_pred must be a 2D array "
"with shape (num_samples, num_classes), found shape: "
f"{y_pred.shape}"
)
# Preserve dimension to match with sample_weight
y_true = y_true[..., class_id, None]
y_pred = y_pred[..., class_id, None]
if thresholds_distributed_evenly:
return _update_confusion_matrix_variables_optimized(
variables_to_update,
y_true,
y_pred,
thresholds,
multi_label=multi_label,
sample_weights=sample_weight,
label_weights=label_weights,
thresholds_with_epsilon=thresholds_with_epsilon,
)
if None in y_pred.shape:
pred_shape = ops.shape(y_pred)
num_predictions = pred_shape[0]
if len(y_pred.shape) == 1:
num_labels = 1
else:
num_labels = ops.cast(
ops.prod(ops.array(pred_shape[1:]), axis=0), "int32"
)
thresh_label_tile = ops.where(one_thresh, num_labels, 1)
else:
pred_shape = y_pred.shape
num_predictions = pred_shape[0]
if len(y_pred.shape) == 1:
num_labels = 1
else:
num_labels = np.prod(pred_shape[1:], axis=0).astype("int32")
thresh_label_tile = np.where(one_thresh, num_labels, 1)
# Reshape predictions and labels, adding a dim for thresholding.
if multi_label:
predictions_extra_dim = ops.expand_dims(y_pred, 0)
labels_extra_dim = ops.expand_dims(ops.cast(y_true, dtype="bool"), 0)
else:
# Flatten predictions and labels when not multilabel.
predictions_extra_dim = ops.reshape(y_pred, [1, -1])
labels_extra_dim = ops.reshape(ops.cast(y_true, dtype="bool"), [1, -1])
# Tile the thresholds for every prediction.
if multi_label:
thresh_pretile_shape = [num_thresholds, 1, -1]
thresh_tiles = [1, num_predictions, thresh_label_tile]
data_tiles = [num_thresholds, 1, 1]
else:
thresh_pretile_shape = [num_thresholds, -1]
thresh_tiles = [1, num_predictions * num_labels]
data_tiles = [num_thresholds, 1]
thresh_tiled = ops.tile(
ops.reshape(thresholds, thresh_pretile_shape), thresh_tiles
)
# Tile the predictions for every threshold.
preds_tiled = ops.tile(predictions_extra_dim, data_tiles)
# Compare predictions and threshold.
pred_is_pos = ops.greater(preds_tiled, thresh_tiled)
# Tile labels by number of thresholds
label_is_pos = ops.tile(labels_extra_dim, data_tiles)
if sample_weight is not None:
sample_weight = ops.broadcast_to(
ops.cast(sample_weight, dtype=y_pred.dtype), y_pred.shape
)
weights_tiled = ops.tile(
ops.reshape(sample_weight, thresh_tiles), data_tiles
)
else:
weights_tiled = None
if label_weights is not None and not multi_label:
label_weights = ops.expand_dims(label_weights, 0)
label_weights = ops.broadcast_to(label_weights, ops.shape(y_pred))
label_weights_tiled = ops.tile(
ops.reshape(label_weights, thresh_tiles), data_tiles
)
if weights_tiled is None:
weights_tiled = label_weights_tiled
else:
weights_tiled = ops.multiply(weights_tiled, label_weights_tiled)
def weighted_assign_add(label, pred, weights, var):
label_and_pred = ops.cast(ops.logical_and(label, pred), dtype=var.dtype)
if weights is not None:
label_and_pred *= ops.cast(weights, dtype=var.dtype)
var.assign(var + ops.sum(label_and_pred, 1))
loop_vars = {
ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos),
}
update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update
update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
if update_fn or update_tn:
pred_is_neg = ops.logical_not(pred_is_pos)
loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg)
if update_fp or update_tn:
label_is_neg = ops.logical_not(label_is_pos)
loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos)
if update_tn:
loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (
label_is_neg,
pred_is_neg,
)
for matrix_cond, (label, pred) in loop_vars.items():
if matrix_cond in variables_to_update:
weighted_assign_add(
label, pred, weights_tiled, variables_to_update[matrix_cond]
)
def _filter_top_k(x, k):
"""Filters top-k values in the last dim of x and set the rest to NEG_INF.
Used for computing top-k prediction values in dense labels (which has the
same shape as predictions) for recall and precision top-k metrics.
Args:
x: tensor with any dimensions.
k: the number of values to keep.
Returns:
tensor with same shape and dtype as x.
"""
_, top_k_idx = ops.top_k(x, k)
top_k_mask = ops.sum(
ops.one_hot(top_k_idx, ops.shape(x)[-1], axis=-1), axis=-2
)
return x * top_k_mask + NEG_INF * (1 - top_k_mask)
def confusion_matrix(
labels,
predictions,
num_classes=None,
weights=None,
dtype="int32",
):
"""Computes the confusion matrix from predictions and labels.
The matrix columns represent the prediction labels and the rows represent
the real labels. The confusion matrix is always a 2-D array of shape
`(n, n)`, where `n` is the number of valid labels for a given classification
task. Both prediction and labels must be 1-D arrays of the same shape in
order for this function to work.
If `num_classes` is `None`, then `num_classes` will be set to one plus the
maximum value in either predictions or labels. Class labels are expected to
start at 0. For example, if `num_classes` is 3, then the possible labels
would be `[0, 1, 2]`.
If `weights` is not `None`, then each prediction contributes its
corresponding weight to the total value of the confusion matrix cell.
For example:
```python
keras_core.metrics.metrics_utils.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
[[0 0 0 0 0]
[0 0 1 0 0]
[0 0 1 0 0]
[0 0 0 0 0]
[0 0 0 0 1]]
```
Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
resulting in a 5x5 confusion matrix.
Args:
labels: 1-D tensor of real labels for the classification task.
predictions: 1-D tensor of predictions for a given classification.
num_classes: The possible number of labels the classification task can
have. If this value is not provided, it will be calculated
using both predictions and labels array.
weights: An optional tensor whose shape matches `predictions`.
dtype: Data type of the confusion matrix.
Returns:
A tensor of type `dtype` with shape `(n, n)` representing the confusion
matrix, where `n` is the number of possible labels in the classification
task.
"""
labels = ops.convert_to_tensor(labels, dtype)
predictions = ops.convert_to_tensor(predictions, dtype)
labels, predictions = squeeze_to_same_rank(labels, predictions)
predictions = ops.cast(predictions, dtype)
labels = ops.cast(labels, dtype)
if num_classes is None:
num_classes = ops.maximum(ops.max(predictions), ops.max(labels)) + 1
else:
num_classes = ops.cast(num_classes, dtype)
if weights is not None:
weights = ops.convert_to_tensor(weights, dtype)
indices = ops.stack([labels, predictions], axis=1)
values = ops.ones_like(predictions, dtype) if weights is None else weights
indices = ops.cast(indices, dtype="int64")
values = ops.cast(values, dtype=dtype)
num_classes = ops.cast(num_classes, "int64")
confusion_matrix = ops.scatter(indices, values, (num_classes, num_classes))
return confusion_matrix
| keras-core/keras_core/metrics/metrics_utils.py/0 | {
"file_path": "keras-core/keras_core/metrics/metrics_utils.py",
"repo_id": "keras-core",
"token_count": 11195
} | 47 |
import copy
import tree
from keras_core.api_export import keras_core_export
from keras_core.backend.common import global_state
from keras_core.layers.core.input_layer import InputLayer
from keras_core.layers.layer import Layer
from keras_core.legacy.saving import saving_utils
from keras_core.legacy.saving import serialization as legacy_serialization
from keras_core.models.functional import Functional
from keras_core.models.model import Model
from keras_core.saving import serialization_lib
@keras_core_export(["keras_core.Sequential", "keras_core.models.Sequential"])
class Sequential(Model):
"""`Sequential` groups a linear stack of layers into a `Model`.
Examples:
```python
model = keras_core.Sequential()
model.add(keras_core.Input(shape=(16,)))
model.add(keras_core.layers.Dense(8))
# Note that you can also omit the initial `Input`.
# In that case the model doesn't have any weights until the first call
# to a training/evaluation method (since it isn't yet built):
model = keras_core.Sequential()
model.add(keras_core.layers.Dense(8))
model.add(keras_core.layers.Dense(4))
# model.weights not created yet
# Whereas if you specify an `Input`, the model gets built
# continuously as you are adding layers:
model = keras_core.Sequential()
model.add(keras_core.Input(shape=(16,)))
model.add(keras_core.layers.Dense(8))
len(model.weights) # Returns "2"
# When using the delayed-build pattern (no input shape specified), you can
# choose to manually build your model by calling
# `build(batch_input_shape)`:
model = keras_core.Sequential()
model.add(keras_core.layers.Dense(8))
model.add(keras_core.layers.Dense(4))
model.build((None, 16))
len(model.weights) # Returns "4"
# Note that when using the delayed-build pattern (no input shape specified),
# the model gets built the first time you call `fit`, `eval`, or `predict`,
# or the first time you call the model on some input data.
model = keras_core.Sequential()
model.add(keras_core.layers.Dense(8))
model.add(keras_core.layers.Dense(1))
model.compile(optimizer='sgd', loss='mse')
# This builds the model for the first time:
model.fit(x, y, batch_size=32, epochs=10)
```
"""
def __init__(self, layers=None, trainable=True, name=None):
super().__init__(trainable=trainable, name=name)
self._functional = None
self._layers = []
if layers:
for layer in layers:
self.add(layer, rebuild=False)
self._maybe_rebuild()
def add(self, layer, rebuild=True):
"""Adds a layer instance on top of the layer stack.
Args:
layer: layer instance.
"""
# Legacy case: if the first layer has an input_shape arg,
# use it to build an InputLayer.
if not self._layers:
if getattr(layer, "_input_shape_arg", None) is not None:
self.add(InputLayer(shape=layer._input_shape_arg))
# If we are passed a Keras tensor created by keras.Input(), we
# extract the input layer from its keras history and use that.
if hasattr(layer, "_keras_history"):
origin_layer = layer._keras_history[0]
if isinstance(origin_layer, InputLayer):
layer = origin_layer
if not isinstance(layer, Layer):
raise ValueError(
"Only instances of `keras_core.Layer` can be "
f"added to a Sequential model. Received: {layer} "
f"(of type {type(layer)})"
)
if not self._is_layer_name_unique(layer):
raise ValueError(
"All layers added to a Sequential model "
f"should have unique names. Name '{layer.name}' is already "
"the name of a layer in this model. Update the `name` argument "
"to pass a unique name."
)
if (
isinstance(layer, InputLayer)
and self._layers
and isinstance(self._layers[0], InputLayer)
):
raise ValueError(
f"Sequential model '{self.name}' has already been configured "
f"to use input shape {self._layers[0].batch_shape}. You cannot "
f"add a different Input layer to it."
)
self._layers.append(layer)
if rebuild:
self._maybe_rebuild()
else:
self.built = False
self._functional = None
def pop(self, rebuild=True):
"""Removes the last layer in the model."""
layer = self._layers.pop()
self.built = False
self._functional = None
if rebuild:
self._maybe_rebuild()
return layer
def _maybe_rebuild(self):
self.built = False
self._functional = None
if isinstance(self._layers[0], InputLayer) and len(self._layers) > 1:
input_shape = self._layers[0].batch_shape
self.build(input_shape)
def _lock_state(self):
# Unlike other layers, Sequential is mutable after build.
pass
def build(self, input_shape=None):
if not isinstance(input_shape, (tuple, list)):
# Do not attempt to build if the model does not have a single
# input tensor.
return
if input_shape and not (
isinstance(input_shape[0], int) or input_shape[0] is None
):
# Do not attempt to build if the model does not have a single
# input tensor.
return
if not self._layers:
raise ValueError(
f"Sequential model {self.name} cannot be built because it has "
"no layers. Call `model.add(layer)`."
)
if isinstance(self._layers[0], InputLayer):
if self._layers[0].batch_shape != input_shape:
raise ValueError(
f"Sequential model '{self.name}' has already been "
"configured to use input shape "
f"{self._layers[0].batch_shape}. You cannot build it "
f"with input_shape {input_shape}"
)
else:
dtype = self._layers[0].compute_dtype
self._layers = [
InputLayer(batch_shape=input_shape, dtype=dtype)
] + self._layers
# Build functional model
inputs = self._layers[0].output
x = inputs
for layer in self._layers[1:]:
try:
x = layer(x)
except NotImplementedError:
# Can happen if shape inference is not implemented.
# TODO: consider reverting inbound nodes on layers processed.
return
outputs = x
self._functional = Functional(inputs=inputs, outputs=outputs)
self.built = True
def call(self, inputs, training=None, mask=None):
if self._functional:
return self._functional.call(inputs, training=training, mask=mask)
# Fallback: Just apply the layer sequence.
# This typically happens if `inputs` is a nested struct.
for layer in self.layers:
# During each iteration, `inputs` are the inputs to `layer`, and
# `outputs` are the outputs of `layer` applied to `inputs`. At the
# end of each iteration `inputs` is set to `outputs` to prepare for
# the next layer.
kwargs = {}
if layer._call_has_mask_arg:
kwargs["mask"] = mask
if layer._call_has_training_arg and training is not None:
kwargs["training"] = training
outputs = layer(inputs, **kwargs)
inputs = outputs
def _get_mask_from_keras_tensor(kt):
return getattr(kt, "_keras_mask", None)
mask = tree.map_structure(_get_mask_from_keras_tensor, outputs)
return outputs
@property
def layers(self):
# Historically, `sequential.layers` only returns layers that were added
# via `add`, and omits the auto-generated `InputLayer` that comes at the
# bottom of the stack.
layers = self._layers
if layers and isinstance(layers[0], InputLayer):
return layers[1:]
return layers[:]
def compute_output_spec(self, inputs, training=None, mask=None):
if self._functional:
return self._functional.compute_output_spec(
inputs, training=training, mask=mask
)
# Direct application
for layer in self.layers:
outputs = layer.compute_output_spec(
inputs, training=training
) # Ignore mask
inputs = outputs
return outputs
@property
def input_shape(self):
if self._functional:
return self._functional.input_shape
raise ValueError(
f"Sequential model '{self.name}' has no defined input shape yet."
)
@property
def output_shape(self):
if self._functional:
return self._functional.output_shape
raise ValueError(
f"Sequential model '{self.name}' has no defined output shape yet."
)
@property
def inputs(self):
if self._functional:
return self._functional.inputs
raise ValueError(
f"Sequential model '{self.name}' has no defined inputs yet."
)
@property
def outputs(self):
if self._functional:
return self._functional.outputs
raise ValueError(
f"Sequential model '{self.name}' has no defined outputs yet."
)
@property
def input_dtype(self):
# Sequential.__call__ will try to convert its inputs
# to the dtype expected by its input layer, if any.
layers = self._layers
if layers and isinstance(layers[0], InputLayer):
return layers[0].dtype
return super().input_dtype
def _is_layer_name_unique(self, layer):
for ref_layer in self._layers:
if layer.name == ref_layer.name and ref_layer is not layer:
return False
return True
def get_config(self):
serialize_fn = serialization_lib.serialize_keras_object
if global_state.get_global_attribute("use_legacy_config", False):
# Legacy format serialization used for H5 and SavedModel formats
serialize_fn = legacy_serialization.serialize_keras_object
layer_configs = []
for layer in super().layers:
# `super().layers` include the InputLayer if available (it is
# filtered out of `self.layers`).
layer_configs.append(serialize_fn(layer))
config = Model.get_config(self)
config["name"] = self.name
config["layers"] = copy.deepcopy(layer_configs)
if self._functional is not None:
config["build_input_shape"] = self._layers[0].batch_shape
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if "name" in config:
name = config["name"]
build_input_shape = config.get("build_input_shape")
layer_configs = config["layers"]
else:
name = None
layer_configs = config
model = cls(name=name)
for layer_config in layer_configs:
if "module" not in layer_config:
# Legacy format deserialization (no "module" key)
# used for H5 and SavedModel formats
layer = saving_utils.model_from_config(
layer_config,
custom_objects=custom_objects,
)
else:
layer = serialization_lib.deserialize_keras_object(
layer_config,
custom_objects=custom_objects,
)
model.add(layer)
if (
not model._functional
and build_input_shape
and isinstance(build_input_shape, (tuple, list))
):
model.build(build_input_shape)
return model
| keras-core/keras_core/models/sequential.py/0 | {
"file_path": "keras-core/keras_core/models/sequential.py",
"repo_id": "keras-core",
"token_count": 5454
} | 48 |
import numpy as np
from keras_core import Layer
from keras_core import testing
from keras_core.backend import KerasTensor
from keras_core.ops.node import Node
class DummyLayer(Layer):
pass
class NodeTest(testing.TestCase):
# Testing a simple node and layer combination **a**
def test_simple_case(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
a_layer = DummyLayer()
node = Node(a_layer, outputs=a, call_args=(), call_kwargs={})
self.assertEqual(node.is_input, True)
self.assertEqual(node.output_tensors[0], a)
self.assertEqual(node.output_tensors[0].shape, shape)
# Testing a simple node connection with args and kwargs **a** --> **b**
def test_single_wired_layers(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
a_layer = DummyLayer()
node1 = Node(a_layer, outputs=a, call_args=(), call_kwargs={})
b = KerasTensor(shape=shape)
x = KerasTensor(shape=shape)
kwargs = {"x": x}
args = (a,)
b_layer = DummyLayer()
node2 = Node(b_layer, outputs=b, call_args=args, call_kwargs=kwargs)
self.assertEqual(node1.is_input, True)
self.assertEqual(node2.is_input, False)
self.assertEqual(node1.operation, a_layer)
self.assertEqual(node2.operation, b_layer)
self.assertEqual(node1.output_tensors[0], a)
self.assertEqual(node1.output_tensors[0].shape, shape)
self.assertEqual(a_layer._inbound_nodes[0], node1)
self.assertEqual(a_layer._outbound_nodes[0], node2)
self.assertEqual(b_layer._inbound_nodes[0], node2)
self.assertEqual(node2.parent_nodes[0], node1)
self.assertEqual(node2.input_tensors, [a, x])
self.assertEqual(node2.arguments.kwargs, kwargs)
self.assertEqual(node2.arguments.args, args)
# Testing when output tensor is not Keras Tensor
def test_output_tensor_error(self):
a = np.random.rand(2, 3, 4)
a_layer = DummyLayer()
with self.assertRaisesRegex(
ValueError, "operation outputs must be tensors."
):
Node(a_layer, outputs=a, call_args=(), call_kwargs={})
| keras-core/keras_core/ops/node_test.py/0 | {
"file_path": "keras-core/keras_core/ops/node_test.py",
"repo_id": "keras-core",
"token_count": 996
} | 49 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.optimizers import optimizer
@keras_core_export(["keras_core.optimizers.Adam"])
class Adam(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
Adam optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments.
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A float, a
`keras_core.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates. Defaults to
`0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates. Defaults to
`0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
to `1e-7`.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and beyond". Defaults
to `False`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
name="adam",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.amsgrad = amsgrad
def build(self, var_list):
"""Initialize optimizer variables.
Adam optimizer has 3 types of variables: momentums, velocities and
velocity_hat (only set when amsgrad is applied),
Args:
var_list: list of model variables to build Adam variables on.
"""
if self.built:
return
super().build(var_list)
self._momentums = []
self._velocities = []
for var in var_list:
self._momentums.append(
self.add_variable_from_reference(
reference_variable=var, name="momentum"
)
)
self._velocities.append(
self.add_variable_from_reference(
reference_variable=var, name="velocity"
)
)
if self.amsgrad:
self._velocity_hats = []
for var in var_list:
self._velocity_hats.append(
self.add_variable_from_reference(
reference_variable=var, name="velocity_hat"
)
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
beta_1_power = ops.power(
ops.cast(self.beta_1, variable.dtype), local_step
)
beta_2_power = ops.power(
ops.cast(self.beta_2, variable.dtype), local_step
)
m = self._momentums[self._get_variable_index(variable)]
v = self._velocities[self._get_variable_index(variable)]
alpha = lr * ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)
m.assign(m + (gradient - m) * (1 - self.beta_1))
v.assign(v + (ops.square(gradient) - v) * (1 - self.beta_2))
if self.amsgrad:
v_hat = self._velocity_hats[self._get_variable_index(variable)]
v_hat.assign(ops.maximum(v_hat, v))
v = v_hat
variable.assign(variable - (m * alpha) / (ops.sqrt(v) + self.epsilon))
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
"amsgrad": self.amsgrad,
}
)
return config
Adam.__doc__ = Adam.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| keras-core/keras_core/optimizers/adam.py/0 | {
"file_path": "keras-core/keras_core/optimizers/adam.py",
"repo_id": "keras-core",
"token_count": 2582
} | 50 |
import numpy as np
from keras_core import backend
from keras_core import constraints
from keras_core import optimizers
from keras_core import testing
class OptimizerTest(testing.TestCase):
def test_constraints_are_applied(self):
v = backend.Variable(np.random.random((2, 2)) - 1.0)
v.constraint = constraints.NonNeg()
optimizer = optimizers.SGD(learning_rate=0.0001)
grad = backend.numpy.zeros((2, 2))
optimizer.apply_gradients([(grad, v)])
self.assertAlmostEqual(np.min(v), 0.0)
def test_get_method(self):
obj = optimizers.get("sgd")
self.assertIsInstance(obj, optimizers.SGD)
obj = optimizers.get("adamw")
self.assertIsInstance(obj, optimizers.AdamW)
obj = optimizers.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
optimizers.get("typo")
def test_static_loss_scaling(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 2.0], [3.0, 4.0]]) * 1024.0
optimizer = optimizers.SGD(learning_rate=1.0, loss_scale_factor=1024.0)
optimizer.apply_gradients([(grads, v)])
self.assertEqual(optimizer.scale_loss(1.0), 1024.0)
self.assertAllClose(v, [[0.0, 0.0], [0.0, 0.0]])
def test_set_weights(self):
x = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
optimizer_1 = optimizers.Adam()
grads = backend.convert_to_tensor([[1.0, 2.0], [3.0, 4.0]])
optimizer_1.apply_gradients(zip([grads], [x]))
optimizer_2 = optimizers.Adam()
with self.assertRaisesRegex(ValueError, "You are calling*"):
optimizer_2.set_weights(optimizer_1.variables)
optimizer_2.build([x])
optimizer_2.set_weights(optimizer_1.variables)
for i in range(len(optimizer_1.variables)):
self.assertAllClose(
optimizer_1.variables[i],
optimizer_2.variables[i],
)
| keras-core/keras_core/optimizers/optimizer_test.py/0 | {
"file_path": "keras-core/keras_core/optimizers/optimizer_test.py",
"repo_id": "keras-core",
"token_count": 948
} | 51 |
import numpy as np
import pandas
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_core import backend
from keras_core import testing
from keras_core.trainers.data_adapters import array_data_adapter
class TestArrayDataAdapter(testing.TestCase, parameterized.TestCase):
def make_array(self, array_type, shape, dtype="float32"):
if array_type == "np":
return np.ones(shape, dtype=dtype)
elif array_type == "tf":
return tf.ones(shape, dtype=dtype)
elif array_type == "backend":
if backend.backend() == "jax":
import jax
return jax.numpy.ones(shape, dtype=dtype)
elif backend.backend() == "torch":
import torch
return torch.tensor(np.ones(shape, dtype=dtype))
else:
return tf.ones(shape, dtype=dtype)
elif array_type == "pandas":
return pandas.DataFrame(np.ones(shape, dtype=dtype))
@parameterized.parameters([("np",), ("tf",), ("backend",), ("pandas",)])
def test_basic_flow(self, array_type):
x = self.make_array(array_type, (34, 4))
y = self.make_array(array_type, (34, 2))
adapter = array_data_adapter.ArrayDataAdapter(
x,
y=y,
sample_weight=None,
batch_size=16,
steps=None,
shuffle=False,
)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, 16)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 2)
gen = adapter.get_numpy_iterator()
for i, batch in enumerate(gen):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, np.ndarray)
self.assertIsInstance(by, np.ndarray)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, backend.floatx())
if i < 2:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
ds = adapter.get_tf_dataset()
for i, batch in enumerate(ds):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, tf.Tensor)
self.assertIsInstance(by, tf.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, backend.floatx())
if i < 2:
self.assertEqual(tuple(bx.shape), (16, 4))
self.assertEqual(tuple(by.shape), (16, 2))
else:
self.assertEqual(tuple(bx.shape), (2, 4))
self.assertEqual(tuple(by.shape), (2, 2))
def test_multi_inputs_and_outputs(self):
x1 = np.random.random((34, 1))
x2 = np.random.random((34, 2))
y1 = np.random.random((34, 3))
y2 = np.random.random((34, 4))
sw = np.random.random((34,))
adapter = array_data_adapter.ArrayDataAdapter(
x={"x1": x1, "x2": x2},
y=[y1, y2],
sample_weight=sw,
batch_size=16,
steps=None,
shuffle=False,
)
gen = adapter.get_numpy_iterator()
for i, batch in enumerate(gen):
self.assertEqual(len(batch), 3)
bx, by, bw = batch
self.assertIsInstance(bx, dict)
# NOTE: the y list was converted to a tuple for tf.data
# compatibility.
self.assertIsInstance(by, tuple)
self.assertIsInstance(bw, tuple)
self.assertIsInstance(bx["x1"], np.ndarray)
self.assertIsInstance(bx["x2"], np.ndarray)
self.assertIsInstance(by[0], np.ndarray)
self.assertIsInstance(by[1], np.ndarray)
self.assertIsInstance(bw[0], np.ndarray)
self.assertIsInstance(bw[1], np.ndarray)
self.assertEqual(bx["x1"].dtype, by[0].dtype)
self.assertEqual(bx["x1"].dtype, backend.floatx())
if i < 2:
self.assertEqual(bx["x1"].shape, (16, 1))
self.assertEqual(bx["x2"].shape, (16, 2))
self.assertEqual(by[0].shape, (16, 3))
self.assertEqual(by[1].shape, (16, 4))
self.assertEqual(bw[0].shape, (16,))
self.assertEqual(bw[1].shape, (16,))
else:
self.assertEqual(bx["x1"].shape, (2, 1))
self.assertEqual(by[0].shape, (2, 3))
self.assertEqual(bw[0].shape, (2,))
self.assertEqual(bw[1].shape, (2,))
ds = adapter.get_tf_dataset()
for i, batch in enumerate(ds):
self.assertEqual(len(batch), 3)
bx, by, bw = batch
self.assertIsInstance(bx, dict)
# NOTE: the y list was converted to a tuple for tf.data
# compatibility.
self.assertIsInstance(by, tuple)
self.assertIsInstance(bw, tuple)
self.assertIsInstance(bx["x1"], tf.Tensor)
self.assertIsInstance(bx["x2"], tf.Tensor)
self.assertIsInstance(by[0], tf.Tensor)
self.assertIsInstance(by[1], tf.Tensor)
self.assertIsInstance(bw[0], tf.Tensor)
self.assertIsInstance(bw[1], tf.Tensor)
self.assertEqual(bx["x1"].dtype, by[0].dtype)
self.assertEqual(bx["x1"].dtype, backend.floatx())
if i < 2:
self.assertEqual(tuple(bx["x1"].shape), (16, 1))
self.assertEqual(tuple(bx["x2"].shape), (16, 2))
self.assertEqual(tuple(by[0].shape), (16, 3))
self.assertEqual(tuple(by[1].shape), (16, 4))
self.assertEqual(tuple(bw[0].shape), (16,))
self.assertEqual(tuple(bw[1].shape), (16,))
else:
self.assertEqual(tuple(bx["x1"].shape), (2, 1))
self.assertEqual(tuple(by[0].shape), (2, 3))
self.assertEqual(tuple(bw[0].shape), (2,))
self.assertEqual(tuple(bw[1].shape), (2,))
@parameterized.parameters([("int",), ("categorical",)])
def test_class_weights(self, target_encoding):
x = np.random.random((4, 2))
if target_encoding == "int":
y = np.array([[0], [1], [2], [3]], dtype="int32")
else:
y = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype="float32",
)
class_weight = {
0: 0.1,
1: 0.2,
2: 0.3,
3: 0.4,
}
adapter = array_data_adapter.ArrayDataAdapter(
x,
y=y,
class_weight=class_weight,
batch_size=16,
)
gen = adapter.get_numpy_iterator()
for batch in gen:
self.assertEqual(len(batch), 3)
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
def test_errors(self):
# TODO
pass
@parameterized.parameters([("np",), ("tf",), ("backend",), ("pandas",)])
def test_integer_inputs(self, array_type):
x1 = self.make_array(array_type, (4, 4), dtype="float64")
x2 = self.make_array(array_type, (4, 4), dtype="int32")
y = self.make_array(array_type, (4, 2))
adapter = array_data_adapter.ArrayDataAdapter(
(x1, x2),
y=y,
sample_weight=None,
batch_size=4,
steps=None,
shuffle=False,
)
(x1, x2), y = next(adapter.get_numpy_iterator())
self.assertEqual(x1.dtype, backend.floatx())
self.assertEqual(x2.dtype, "int32")
def test_pandas_series(self):
x = pandas.Series(np.ones((10,)))
y = np.ones((10,))
adapter = array_data_adapter.ArrayDataAdapter(
x,
y=y,
sample_weight=None,
batch_size=4,
steps=None,
shuffle=False,
)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, 4)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 2)
x, y = next(adapter.get_numpy_iterator())
self.assertEqual(x.dtype, backend.floatx())
self.assertIsInstance(x, np.ndarray)
self.assertEqual(x.shape, (4, 1))
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Only tensorflow supports raggeds",
)
def test_tf_ragged(self):
x = tf.ragged.constant([[1, 2], [1, 2, 3], [1, 2], [1], []], "float64")
y = np.ones((5,))
adapter = array_data_adapter.ArrayDataAdapter(
x,
y=y,
sample_weight=None,
batch_size=2,
steps=None,
shuffle=False,
)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, 2)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 1)
x, y = next(adapter.get_numpy_iterator())
self.assertEqual(x.dtype, backend.floatx())
self.assertIsInstance(x, tf.RaggedTensor)
self.assertEqual(x.shape, (2, None))
| keras-core/keras_core/trainers/data_adapters/array_data_adapter_test.py/0 | {
"file_path": "keras-core/keras_core/trainers/data_adapters/array_data_adapter_test.py",
"repo_id": "keras-core",
"token_count": 5119
} | 52 |
sh_binary(
name = "build_pip_pkg",
srcs = ["build_deps/build_pip_pkg.sh"],
data = [
"LICENSE",
"MANIFEST.in",
"README.md",
"setup.cfg",
"setup.py",
"//keras_cv",
],
)
| keras-cv/BUILD.bazel/0 | {
"file_path": "keras-cv/BUILD.bazel",
"repo_id": "keras-cv",
"token_count": 138
} | 53 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.layers import JitteredResize
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
BOUNDING_BOXES,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
IMAGES,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldJitteredResize(BaseImageAugmentationLayer):
"""JitteredResize implements resize with scale distortion.
JitteredResize takes a three-step approach to size-distortion based image
augmentation. This technique is specifically tuned for object detection
pipelines. The layer takes an input of images and bounding boxes, both of
which may be ragged. It outputs a dense image tensor, ready to feed to a
model for training. As such this layer will commonly be the final step in an
augmentation pipeline.
The augmentation process is as follows:
The image is first scaled according to a randomly sampled scale factor. The
width and height of the image are then resized according to the sampled
scale. This is done to introduce noise into the local scale of features in
the image. A subset of the image is then cropped randomly according to
`crop_size`. This crop is then padded to be `target_size`. Bounding boxes
are translated and scaled according to the random scaling and random
cropping.
Usage:
```python
train_ds = load_object_detection_dataset()
jittered_resize = layers.JitteredResize(
target_size=(640, 640),
scale_factor=(0.8, 1.25),
bounding_box_format="xywh",
)
train_ds = train_ds.map(
jittered_resize, num_parallel_calls=tf.data.AUTOTUNE
)
# images now are (640, 640, 3)
# an example using crop size
train_ds = load_object_detection_dataset()
jittered_resize = layers.JitteredResize(
target_size=(640, 640),
crop_size=(250, 250),
scale_factor=(0.8, 1.25),
bounding_box_format="xywh",
)
train_ds = train_ds.map(
jittered_resize, num_parallel_calls=tf.data.AUTOTUNE
)
# images now are (640, 640, 3), but they were resized from a 250x250 crop.
```
Args:
target_size: A tuple representing the output size of images.
scale_factor: A tuple of two floats or a `keras_cv.FactorSampler`. For
each augmented image a value is sampled from the provided range.
This factor is used to scale the input image.
To replicate the results of the MaskRCNN paper pass `(0.8, 1.25)`.
crop_size: (Optional) the size of the image to crop from the scaled
image, defaults to `target_size` when not provided.
bounding_box_format: The format of bounding boxes of input boxes.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
interpolation: String, the interpolation method, defaults to
`"bilinear"`. Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
`"area"`, `"lanczos3"`, `"lanczos5"`, `"gaussian"`,
`"mitchellcubic"`.
seed: (Optional) integer to use as the random seed.
"""
def __init__(
self,
target_size,
scale_factor,
crop_size=None,
bounding_box_format=None,
interpolation="bilinear",
seed=None,
**kwargs,
):
super().__init__(**kwargs)
if not isinstance(target_size, tuple) or len(target_size) != 2:
raise ValueError(
"JitteredResize() expects `target_size` to be a tuple of two "
f"integers. Received `target_size={target_size}`"
)
crop_size = crop_size or target_size
self.interpolation = preprocessing_utils.get_interpolation(
interpolation
)
self.scale_factor = preprocessing_utils.parse_factor(
scale_factor,
min_value=0.0,
max_value=None,
param_name="scale_factor",
seed=seed,
)
self.crop_size = crop_size
self.target_size = target_size
self.bounding_box_format = bounding_box_format
self.seed = seed
self.force_output_dense_images = True
self.auto_vectorize = False
def get_random_transformation(self, image=None, **kwargs):
original_image_shape = tf.shape(image)
image_shape = tf.cast(original_image_shape[0:2], tf.float32)
scaled_size = tf.round(image_shape * self.scale_factor())
scale = tf.minimum(
scaled_size[0] / image_shape[0], scaled_size[1] / image_shape[1]
)
scaled_size = tf.round(image_shape * scale)
image_scale = scaled_size / image_shape
max_offset = scaled_size - self.crop_size
max_offset = tf.where(
tf.less(max_offset, 0), tf.zeros_like(max_offset), max_offset
)
offset = max_offset * tf.random.uniform([2], minval=0, maxval=1)
offset = tf.cast(offset, tf.int32)
return {
"original_size": original_image_shape,
"image_scale": image_scale,
"scaled_size": scaled_size,
"offset": offset,
}
def compute_image_signature(self, images):
return tf.TensorSpec(
shape=list(self.target_size) + [images.shape[-1]],
dtype=self.compute_dtype,
)
def augment_image(self, image, transformation, **kwargs):
# unpackage augmentation arguments
scaled_size = transformation["scaled_size"]
offset = transformation["offset"]
target_size = self.target_size
crop_size = self.crop_size
scaled_image = tf.image.resize(
image, tf.cast(scaled_size, tf.int32), method=self.interpolation
)
scaled_image = scaled_image[
offset[0] : offset[0] + crop_size[0],
offset[1] : offset[1] + crop_size[1],
:,
]
scaled_image = tf.image.pad_to_bounding_box(
scaled_image, 0, 0, target_size[0], target_size[1]
)
return tf.cast(scaled_image, self.compute_dtype)
def augment_bounding_boxes(self, bounding_boxes, transformation, **kwargs):
if self.bounding_box_format is None:
raise ValueError(
"Please provide a `bounding_box_format` when augmenting "
"bounding boxes with `JitteredResize()`."
)
result = bounding_boxes.copy()
image_scale = tf.cast(transformation["image_scale"], self.compute_dtype)
offset = tf.cast(transformation["offset"], self.compute_dtype)
original_size = transformation["original_size"]
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
image_shape=original_size,
source=self.bounding_box_format,
target="yxyx",
)
# Adjusts box coordinates based on image_scale and offset.
yxyx = bounding_boxes["boxes"]
yxyx *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
yxyx -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
result["boxes"] = yxyx
result = bounding_box.clip_to_image(
result,
image_shape=self.target_size + (3,),
bounding_box_format="yxyx",
)
result = bounding_box.convert_format(
result,
image_shape=self.target_size + (3,),
source="yxyx",
target=self.bounding_box_format,
)
return result
def augment_label(self, label, transformation, **kwargs):
return label
def get_config(self):
config = super().get_config()
config.update(
{
"target_size": self.target_size,
"scale_factor": self.scale_factor,
"crop_size": self.crop_size,
"bounding_box_format": self.bounding_box_format,
"interpolation": self.interpolation,
"seed": self.seed,
}
)
return config
class JitteredResizeTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
target_size = (32, 32)
fixed_scale_factor = (3 / 4, 3 / 4)
image = tf.random.uniform(shape=(1, 64, 64, 3)) * 255.0
layer = JitteredResize(
target_size=target_size,
scale_factor=fixed_scale_factor,
)
old_layer = OldJitteredResize(
target_size=target_size,
scale_factor=fixed_scale_factor,
)
# makes offsets fixed to (0.5, 0.5)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=tf.convert_to_tensor([[0.5, 0.5]]),
):
output = layer(image)
with unittest.mock.patch.object(
tf.random,
"uniform",
return_value=tf.convert_to_tensor([0.5, 0.5]),
):
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
is_inputs_containing_bounding_boxes = True
num_images = [100, 200, 500, 1000]
results = {}
aug_candidates = [JitteredResize, OldJitteredResize]
aug_args = {
"target_size": (30, 30),
"scale_factor": (3 / 4, 4 / 3),
"bounding_box_format": "xyxy",
}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
inputs = {IMAGES: x_train[:n_images]}
if is_inputs_containing_bounding_boxes:
inputs.update(
{
BOUNDING_BOXES: {
"classes": tf.zeros(shape=(n_images, 4)),
"boxes": tf.zeros(shape=(n_images, 4, 4)),
}
}
)
# warmup
layer(inputs)
t0 = time.time()
r1 = layer(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
inputs = {IMAGES: x_train[:n_images]}
if is_inputs_containing_bounding_boxes:
inputs.update(
{
BOUNDING_BOXES: {
"classes": tf.zeros(shape=(n_images, 4)),
"boxes": tf.zeros(shape=(n_images, 4, 4)),
}
}
)
# warmup
apply_aug(inputs)
t0 = time.time()
r1 = apply_aug(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# tf.map_fn while_loop cannot run on XLA
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_jittered_resize.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_jittered_resize.py",
"repo_id": "keras-cv",
"token_count": 5992
} | 54 |
#!/usr/bin/env bash
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Builds a wheel of KerasCV for Pip. Requires Bazel.
# Adapted from https://github.com/tensorflow/addons/blob/master/build_deps/build_pip_pkg.sh
set -e
set -x
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
function is_windows() {
if [[ "${PLATFORM}" =~ (cygwin|mingw32|mingw64|msys)_nt* ]]; then
true
else
false
fi
}
if is_windows; then
PIP_FILE_PREFIX="bazel-bin/build_pip_pkg.exe.runfiles/__main__/"
else
PIP_FILE_PREFIX="bazel-bin/build_pip_pkg.runfiles/__main__/"
fi
function main() {
while [[ ! -z "${1}" ]]; do
if [[ ${1} == "make" ]]; then
echo "Using Makefile to build pip package."
PIP_FILE_PREFIX=""
else
DEST=${1}
fi
shift
done
if [[ -z ${DEST} ]]; then
echo "No destination dir provided"
exit 1
fi
# Create the directory, then do dirname on a non-existent file inside it to
# give us an absolute paths with tilde characters resolved to the destination
# directory.
mkdir -p ${DEST}
if [[ ${PLATFORM} == "darwin" ]]; then
DEST=$(pwd -P)/${DEST}
else
DEST=$(readlink -f "${DEST}")
fi
echo "=== destination directory: ${DEST}"
TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX)
echo $(date) : "=== Using tmpdir: ${TMPDIR}"
echo "=== Copy KerasCV Custom op files"
cp ${PIP_FILE_PREFIX}setup.cfg "${TMPDIR}"
cp ${PIP_FILE_PREFIX}setup.py "${TMPDIR}"
cp ${PIP_FILE_PREFIX}MANIFEST.in "${TMPDIR}"
cp ${PIP_FILE_PREFIX}README.md "${TMPDIR}"
cp ${PIP_FILE_PREFIX}LICENSE "${TMPDIR}"
if is_windows; then
from=$(cygpath -w ${PIP_FILE_PREFIX}keras_cv)
to=$(cygpath -w "${TMPDIR}"/keras_cv)
start robocopy //S "${from}" "${to}" //xf *_test.py
sleep 5
else
rsync -avm -L --exclude='*_test.py' ${PIP_FILE_PREFIX}keras_cv "${TMPDIR}"
fi
pushd ${TMPDIR}
echo $(date) : "=== Building wheel"
python setup.py bdist_wheel > /dev/null
cp dist/*.whl "${DEST}"
popd
rm -rf ${TMPDIR}
echo $(date) : "=== Output wheel file is in: ${DEST}"
}
main "$@"
| keras-cv/build_deps/build_pip_pkg.sh/0 | {
"file_path": "keras-cv/build_deps/build_pip_pkg.sh",
"repo_id": "keras-cv",
"token_count": 1026
} | 55 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for preprocessing demos."""
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras import backend
image_size = 512
BATCH_SIZE = 32
AUTOTUNE = tf.data.AUTOTUNE
mean = tf.constant([0.485, 0.456, 0.406])
std = tf.constant([0.229, 0.224, 0.225])
def normalize(input_image, input_mask):
input_image = tf.image.convert_image_dtype(input_image, tf.float32)
input_image = (input_image - mean) / tf.maximum(std, backend.epsilon())
input_image = input_image / 255
input_mask -= 1
return input_image, input_mask
def to_dict(datapoint):
input_image = tf.image.resize(datapoint["image"], (image_size, image_size))
input_mask = tf.image.resize(
datapoint["segmentation_mask"],
(image_size, image_size),
method="bilinear",
)
input_image, input_mask = normalize(input_image, input_mask)
input_mask = tf.one_hot(
tf.squeeze(tf.cast(input_mask, tf.int32), axis=-1), depth=3
)
return {"images": input_image, "segmentation_masks": input_mask}
def load_oxford_iiit_pet_dataset():
data, ds_info = tfds.load("oxford_iiit_pet:3.*.*", with_info=True)
print("Dataset info: ", ds_info)
dataset = data["train"]
return (
dataset.shuffle(10 * BATCH_SIZE)
.map(to_dict, num_parallel_calls=AUTOTUNE)
.batch(BATCH_SIZE)
)
def display(display_list):
plt.figure(figsize=(6, 6))
title = ["Input Image", "True Mask", "Predicted Mask"]
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
plt.title(title[i])
plt.imshow(tf.keras.utils.array_to_img(display_list[i]))
plt.axis("off")
plt.show()
def visualize_dataset(ds):
for samples in ds.take(1):
sample_image, sample_mask = (
samples["images"][0],
samples["segmentation_masks"][0],
)
display([sample_image, sample_mask])
| keras-cv/examples/layers/preprocessing/segmentation/demo_utils.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/segmentation/demo_utils.py",
"repo_id": "keras-cv",
"token_count": 1010
} | 56 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Title: Train an Object Detection Model on Pascal VOC 2007 using KerasCV
Author: [lukewood](https://github.com/LukeWood), [tanzhenyu](https://github.com/tanzhenyu)
Date created: 2022/09/27
Last modified: 2023/03/29
Description: Use KerasCV to train a RetinaNet on Pascal VOC 2007.
""" # noqa: E501
import resource
import sys
import tensorflow as tf
import tensorflow_datasets as tfds
import tqdm
from absl import flags
from tensorflow import keras
import keras_cv
from keras_cv.callbacks import PyCOCOCallback
low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
flags.DEFINE_integer(
"epochs",
100,
"Number of epochs to run for.",
)
flags.DEFINE_string(
"weights_name",
"weights_{epoch:02d}.weights.h5",
"Directory which will be used to store weight checkpoints.",
)
flags.DEFINE_string(
"tensorboard_path",
"logs",
"Directory which will be used to store tensorboard logs.",
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
# parameters from RetinaNet [paper](https://arxiv.org/abs/1708.02002)
# Try to detect an available TPU. If none is present, defaults to
# MirroredStrategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
strategy = tf.distribute.TPUStrategy(tpu)
except ValueError:
# MirroredStrategy is best for a single machine with one or multiple GPUs
strategy = tf.distribute.MirroredStrategy()
BATCH_SIZE = 4
GLOBAL_BATCH_SIZE = BATCH_SIZE * strategy.num_replicas_in_sync
BASE_LR = 0.005 * GLOBAL_BATCH_SIZE / 16
print("Number of accelerators: ", strategy.num_replicas_in_sync)
print("Global Batch Size: ", GLOBAL_BATCH_SIZE)
IMG_SIZE = 640
image_size = [IMG_SIZE, IMG_SIZE, 3]
train_ds = tfds.load(
"voc/2007", split="train+validation", with_info=False, shuffle_files=True
)
train_ds = train_ds.concatenate(
tfds.load(
"voc/2012",
split="train+validation",
with_info=False,
shuffle_files=True,
)
)
eval_ds = tfds.load("voc/2007", split="test", with_info=False)
def unpackage_tfds_inputs(inputs, bounding_box_format):
image = inputs["image"]
boxes = keras_cv.bounding_box.convert_format(
inputs["objects"]["bbox"],
images=image,
source="rel_yxyx",
target=bounding_box_format,
)
bounding_boxes = {
"classes": tf.cast(inputs["objects"]["label"], dtype=tf.float32),
"boxes": tf.cast(boxes, dtype=tf.float32),
}
return {
"images": tf.cast(image, tf.float32),
"bounding_boxes": bounding_boxes,
}
train_ds = train_ds.map(
lambda inputs: unpackage_tfds_inputs(inputs, bounding_box_format="xywh"),
num_parallel_calls=tf.data.AUTOTUNE,
)
eval_ds = eval_ds.map(
lambda inputs: unpackage_tfds_inputs(inputs, bounding_box_format="xywh"),
num_parallel_calls=tf.data.AUTOTUNE,
)
augmenter = keras.Sequential(
layers=[
keras_cv.layers.RandomFlip(
mode="horizontal", bounding_box_format="xywh"
),
keras_cv.layers.JitteredResize(
target_size=(640, 640),
scale_factor=(0.8, 1.25),
bounding_box_format="xywh",
),
]
)
rand_augment = keras_cv.layers.RandAugment(
value_range=(0, 255),
augmentations_per_image=2,
magnitude=0.2,
rate=0.5,
magnitude_stddev=0.1,
geometric=False,
)
def apply_rand_augment(inputs):
inputs["images"] = rand_augment(inputs["images"])
return inputs
train_ds = train_ds.map(apply_rand_augment)
train_ds = train_ds.apply(
tf.data.experimental.dense_to_ragged_batch(BATCH_SIZE)
)
train_ds = train_ds.map(augmenter, num_parallel_calls=tf.data.AUTOTUNE)
def pad_fn(inputs):
inputs["bounding_boxes"] = keras_cv.bounding_box.to_dense(
inputs["bounding_boxes"], max_boxes=32
)
return inputs
train_ds = train_ds.shuffle(8 * strategy.num_replicas_in_sync)
train_ds = train_ds.map(pad_fn, num_parallel_calls=tf.data.AUTOTUNE)
train_ds = train_ds.prefetch(tf.data.AUTOTUNE)
eval_resizing = keras_cv.layers.Resizing(
640, 640, pad_to_aspect_ratio=True, bounding_box_format="xywh"
)
eval_ds = eval_ds.map(
eval_resizing,
num_parallel_calls=tf.data.AUTOTUNE,
)
eval_ds = eval_ds.apply(tf.data.experimental.dense_to_ragged_batch(BATCH_SIZE))
eval_ds = eval_ds.map(pad_fn, num_parallel_calls=tf.data.AUTOTUNE)
eval_ds = eval_ds.prefetch(tf.data.AUTOTUNE)
"""
## Model creation
We'll use the KerasCV API to construct a RetinaNet model. In this tutorial we
use a pretrained ResNet50 backbone using weights. In order to perform
fine-tuning, we freeze the backbone before training. When
`include_rescaling=True` is set, inputs to the model are expected to be in the
range `[0, 255]`.
"""
with strategy.scope():
model = keras_cv.models.RetinaNet(
# number of classes to be used in box classification
num_classes=20,
# For more info on supported bounding box formats, visit
# https://keras.io/api/keras_cv/bounding_box/
bounding_box_format="xywh",
backbone=keras_cv.models.ResNet50Backbone.from_preset(
"resnet50_imagenet"
),
)
lr_decay = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[12000 * 16, 16000 * 16],
values=[BASE_LR, 0.1 * BASE_LR, 0.01 * BASE_LR],
)
optimizer = tf.keras.optimizers.SGD(
learning_rate=lr_decay, momentum=0.9, global_clipnorm=10.0
)
model.prediction_decoder = keras_cv.layers.MultiClassNonMaxSuppression(
bounding_box_format="xywh", confidence_threshold=0.5, from_logits=True
)
model.compile(
classification_loss="focal",
box_loss="smoothl1",
optimizer=optimizer,
metrics=[],
)
class EvaluateCOCOMetricsCallback(keras.callbacks.Callback):
def __init__(self, data):
super().__init__()
self.data = data
self.metrics = keras_cv.metrics.BoxCOCOMetrics(
bounding_box_format="xywh", evaluate_freq=1e9
)
def on_epoch_end(self, epoch, logs):
self.metrics.reset_state()
for batch in tqdm.tqdm(self.data):
images, y_true = batch[0], batch[1]
y_pred = self.model.predict(images, verbose=0)
self.metrics.update_state(y_true, y_pred)
metrics = self.metrics.result(force=True)
logs.update(metrics)
return logs
callbacks = [
keras.callbacks.ReduceLROnPlateau(patience=5),
keras.callbacks.EarlyStopping(patience=10),
keras.callbacks.ModelCheckpoint(FLAGS.weights_name, save_weights_only=True),
# Temporarily need PyCOCOCallback to verify
# a 1:1 comparison with the PyMetrics version.
# Currently, results do not match. I have a feeling this is due
# to how we are creating the boxes in `BoxCOCOMetrics`
PyCOCOCallback(eval_ds, bounding_box_format="xywh"),
keras.callbacks.TensorBoard(log_dir=FLAGS.tensorboard_path),
]
history = model.fit(
train_ds,
validation_data=eval_ds,
epochs=FLAGS.epochs,
callbacks=callbacks,
)
| keras-cv/examples/training/object_detection/pascal_voc/retinanet.py/0 | {
"file_path": "keras-cv/examples/training/object_detection/pascal_voc/retinanet.py",
"repo_id": "keras-cv",
"token_count": 3107
} | 57 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import config
if config.keras_3():
from keras.src.backend.tensorflow import * # noqa: F403, F401
from keras.src.backend.tensorflow import ( # noqa: F403, F401
convert_to_numpy,
)
from keras.src.backend.tensorflow.core import * # noqa: F403, F401
from keras.src.backend.tensorflow.math import * # noqa: F403, F401
from keras.src.backend.tensorflow.nn import * # noqa: F403, F401
from keras.src.backend.tensorflow.numpy import * # noqa: F403, F401
else:
# isort: off
from keras_core.src.backend.tensorflow import * # noqa: F403, F401
from keras_core.src.backend.tensorflow import ( # noqa: F403, F401
convert_to_numpy,
)
from keras_core.src.backend.tensorflow.core import * # noqa: F403, F401
from keras_core.src.backend.tensorflow.math import * # noqa: F403, F401
from keras_core.src.backend.tensorflow.nn import * # noqa: F403, F401
from keras_core.src.backend.tensorflow.numpy import * # noqa: F403, F401, E501
# Some TF APIs where the numpy API doesn't support raggeds that we need
from tensorflow import broadcast_to # noqa: F403, F401
from tensorflow import concat as concatenate # noqa: F403, F401
from tensorflow import repeat # noqa: F403, F401
from tensorflow import reshape # noqa: F403, F401
from tensorflow import range as arange # noqa: F403, F401
from tensorflow import reduce_all as all # noqa: F403, F401
from tensorflow import reduce_max as max # noqa: F403, F401
from tensorflow import split # noqa: F403, F401
import numpy as np
import tensorflow as tf
def smart_resize(x, size, interpolation="bilinear"):
"""Resize images to a target size without aspect ratio distortion.
Copied from `tf_keras` for Keras 3 and for use in `tf.data` pipeline.
"""
if len(size) != 2:
raise ValueError(
f"Expected `size` to be a tuple of 2 integers, but got: {size}."
)
img = tf.convert_to_tensor(x)
if img.shape.rank is not None:
if img.shape.rank < 3 or img.shape.rank > 4:
raise ValueError(
"Expected an image array with shape `(height, width, "
"channels)`, or `(batch_size, height, width, channels)`, but "
f"got input with incorrect rank, of shape {img.shape}."
)
shape = tf.shape(img)
height, width = shape[-3], shape[-2]
target_height, target_width = size
if img.shape.rank is not None:
static_num_channels = img.shape[-1]
else:
static_num_channels = None
crop_height = tf.cast(
tf.cast(width * target_height, "float32") / target_width, "int32"
)
crop_width = tf.cast(
tf.cast(height * target_width, "float32") / target_height, "int32"
)
# Set back to input height / width if crop_height / crop_width is not
# smaller.
crop_height = tf.minimum(height, crop_height)
crop_width = tf.minimum(width, crop_width)
crop_box_hstart = tf.cast(
tf.cast(height - crop_height, "float32") / 2, "int32"
)
crop_box_wstart = tf.cast(
tf.cast(width - crop_width, "float32") / 2, "int32"
)
if img.shape.rank == 4:
crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0])
crop_box_size = tf.stack([-1, crop_height, crop_width, -1])
else:
crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0])
crop_box_size = tf.stack([crop_height, crop_width, -1])
img = tf.slice(img, crop_box_start, crop_box_size)
img = tf.image.resize(images=img, size=size, method=interpolation)
# Apparent bug in resize_images_v2 may cause shape to be lost
if img.shape.rank is not None:
if img.shape.rank == 4:
img.set_shape((None, None, None, static_num_channels))
if img.shape.rank == 3:
img.set_shape((None, None, static_num_channels))
if isinstance(x, np.ndarray):
return img.numpy()
return img
| keras-cv/keras_cv/backend/tf_ops.py/0 | {
"file_path": "keras-cv/keras_cv/backend/tf_ops.py",
"repo_id": "keras-cv",
"token_count": 1832
} | 58 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with bounding boxes."""
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.bounding_box.formats import XYWH
@keras_cv_export("keras_cv.bounding_box.is_relative")
def is_relative(bounding_box_format):
"""A util to check if a bounding box format uses relative coordinates"""
if (
bounding_box_format.lower()
not in bounding_box.converters.TO_XYXY_CONVERTERS
):
raise ValueError(
"`is_relative()` received an unsupported format for the argument "
f"`bounding_box_format`. `bounding_box_format` should be one of "
f"{bounding_box.converters.TO_XYXY_CONVERTERS.keys()}. "
f"Got bounding_box_format={bounding_box_format}"
)
return bounding_box_format.startswith("rel")
@keras_cv_export("keras_cv.bounding_box.as_relative")
def as_relative(bounding_box_format):
"""A util to get the relative equivalent of a provided bounding box format.
If the specified format is already a relative format,
it will be returned unchanged.
"""
if not is_relative(bounding_box_format):
return "rel_" + bounding_box_format
return bounding_box_format
def _relative_area(boxes, bounding_box_format):
boxes = bounding_box.convert_format(
boxes,
source=bounding_box_format,
target="rel_xywh",
)
widths = boxes[..., XYWH.WIDTH]
heights = boxes[..., XYWH.HEIGHT]
# handle corner case where shear performs a full inversion.
return ops.where(
ops.logical_and(widths > 0, heights > 0), widths * heights, 0.0
)
@keras_cv_export("keras_cv.bounding_box.clip_to_image")
def clip_to_image(
bounding_boxes, bounding_box_format, images=None, image_shape=None
):
"""clips bounding boxes to image boundaries.
`clip_to_image()` clips bounding boxes that have coordinates out of bounds
of an image down to the boundaries of the image. This is done by converting
the bounding box to relative formats, then clipping them to the `[0, 1]`
range. Additionally, bounding boxes that end up with a zero area have their
class ID set to -1, indicating that there is no object present in them.
Args:
bounding_boxes: bounding box tensor to clip.
bounding_box_format: the KerasCV bounding box format the bounding boxes
are in.
images: list of images to clip the bounding boxes to.
image_shape: the shape of the images to clip the bounding boxes to.
"""
boxes, classes = bounding_boxes["boxes"], bounding_boxes["classes"]
boxes = bounding_box.convert_format(
boxes,
source=bounding_box_format,
target="rel_xyxy",
images=images,
image_shape=image_shape,
)
boxes, classes, images, squeeze = _format_inputs(boxes, classes, images)
x1, y1, x2, y2 = ops.split(boxes, 4, axis=-1)
clipped_bounding_boxes = ops.concatenate(
[
ops.clip(x1, 0, 1),
ops.clip(y1, 0, 1),
ops.clip(x2, 0, 1),
ops.clip(y2, 0, 1),
],
axis=-1,
)
areas = _relative_area(
clipped_bounding_boxes, bounding_box_format="rel_xyxy"
)
clipped_bounding_boxes = bounding_box.convert_format(
clipped_bounding_boxes,
source="rel_xyxy",
target=bounding_box_format,
images=images,
image_shape=image_shape,
)
clipped_bounding_boxes = ops.where(
ops.expand_dims(areas > 0.0, axis=-1), clipped_bounding_boxes, -1.0
)
classes = ops.where(areas > 0.0, classes, -1)
nan_indices = ops.any(ops.isnan(clipped_bounding_boxes), axis=-1)
classes = ops.where(nan_indices, -1, classes)
# TODO update dict and return
clipped_bounding_boxes, classes = _format_outputs(
clipped_bounding_boxes, classes, squeeze
)
result = bounding_boxes.copy()
result["boxes"] = clipped_bounding_boxes
result["classes"] = classes
return result
# TODO (tanzhenyu): merge with clip_to_image
def _clip_boxes(boxes, box_format, image_shape):
"""Clip boxes to the boundaries of the image shape"""
if boxes.shape[-1] != 4:
raise ValueError(
"boxes.shape[-1] is {:d}, but must be 4.".format(boxes.shape[-1])
)
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width, _ = image_shape
max_length = [height, width, height, width]
else:
image_shape = ops.cast(image_shape, dtype=boxes.dtype)
height = image_shape[0]
width = image_shape[1]
max_length = ops.stack([height, width, height, width], axis=-1)
clipped_boxes = ops.maximum(ops.minimum(boxes, max_length), 0.0)
return clipped_boxes
def _format_inputs(boxes, classes, images):
boxes_rank = len(boxes.shape)
if boxes_rank > 3:
raise ValueError(
"Expected len(boxes.shape)=2, or len(boxes.shape)=3, got "
f"len(boxes.shape)={boxes_rank}"
)
boxes_includes_batch = boxes_rank == 3
# Determine if images needs an expand_dims() call
if images is not None:
images_rank = len(images.shape)
if images_rank > 4:
raise ValueError(
"Expected len(images.shape)=2, or len(images.shape)=3, got "
f"len(images.shape)={images_rank}"
)
images_include_batch = images_rank == 4
if boxes_includes_batch != images_include_batch:
raise ValueError(
"clip_to_image() expects both boxes and images to be batched, "
"or both boxes and images to be unbatched. Received "
f"len(boxes.shape)={boxes_rank}, "
f"len(images.shape)={images_rank}. Expected either "
"len(boxes.shape)=2 AND len(images.shape)=3, or "
"len(boxes.shape)=3 AND len(images.shape)=4."
)
if not images_include_batch:
images = ops.expand_dims(images, axis=0)
if not boxes_includes_batch:
return (
ops.expand_dims(boxes, axis=0),
ops.expand_dims(classes, axis=0),
images,
True,
)
return boxes, classes, images, False
def _format_outputs(boxes, classes, squeeze):
if squeeze:
return ops.squeeze(boxes, axis=0), ops.squeeze(classes, axis=0)
return boxes, classes
| keras-cv/keras_cv/bounding_box/utils.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/utils.py",
"repo_id": "keras-cv",
"token_count": 2894
} | 59 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.core.factor_sampler.factor_sampler import FactorSampler
@keras_cv_export("keras_cv.core.NormalFactorSampler")
class NormalFactorSampler(FactorSampler):
"""NormalFactorSampler samples factors from a normal distribution.
This is useful in cases where a user wants to always ensure that an
augmentation layer performs augmentations of the same strength.
Args:
mean: mean value for the distribution.
stddev: standard deviation of the distribution.
min_value: values below min_value are clipped to min_value.
max_value: values above max_value are clipped to max_value.
Usage:
```python
factor = keras_cv.core.NormalFactor(
mean=0.5,
stddev=0.1,
lower=0,
upper=1
)
random_sharpness = keras_cv.layers.RandomSharpness(factor=factor)
# random_sharpness will now sample normally around 0.5, with a lower of 0
# and upper bound of 1.
```
"""
def __init__(self, mean, stddev, min_value, max_value, seed=None):
self.mean = mean
self.stddev = stddev
self.min_value = min_value
self.max_value = max_value
self.seed = seed
def __call__(self, shape=(), dtype="float32"):
return tf.clip_by_value(
tf.random.normal(
shape=shape,
mean=self.mean,
stddev=self.stddev,
seed=self.seed,
dtype=dtype,
),
self.min_value,
self.max_value,
)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"min_value": self.min_value,
"max_value": self.max_value,
"seed": self.seed,
}
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/core/factor_sampler/normal_factor_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/core/factor_sampler/normal_factor_sampler.py",
"repo_id": "keras-cv",
"token_count": 1032
} | 60 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Mapping
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.backend import assert_tf_keras
from keras_cv.bounding_box import iou
from keras_cv.layers.object_detection import box_matcher
from keras_cv.layers.object_detection import sampling
from keras_cv.utils import target_gather
@keras.utils.register_keras_serializable(package="keras_cv")
class _RpnLabelEncoder(keras.layers.Layer):
"""Transforms the raw labels into training targets for region proposal
network (RPN).
# TODO(tanzhenyu): consider unifying with _ROISampler.
This is different from _ROISampler for a couple of reasons:
1) This deals with unbatched input, dict of anchors and potentially ragged
labels.
2) This deals with ground truth boxes, while _ROISampler deals with padded
ground truth boxes with value -1 and padded ground truth classes with
value -1.
3) this returns positive class target as 1, while _ROISampler returns
positive class target as-is. (All negative class target are 0)
The final classification loss will use one hot and #num_fg_classes + 1
4) this returns #num_anchors dense targets, while _ROISampler returns
#num_sampled_rois dense targets.
5) this returns all positive box targets, while _ROISampler still samples
positive box targets, while all negative box targets are also ignored
in regression loss.
Args:
anchor_format: The format of bounding boxes for anchors to generate. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/) for more details on supported bounding box
formats.
ground_truth_box_format: The format of bounding boxes for ground truth
boxes to generate.
positive_threshold: the float threshold to set an anchor to positive match
to gt box. Values above it are positive matches.
negative_threshold: the float threshold to set an anchor to negative match
to gt box. Values below it are negative matches.
samples_per_image: for each image, the number of positive and negative
samples to generate.
positive_fraction: the fraction of positive samples to the total samples.
""" # noqa: E501
def __init__(
self,
anchor_format,
ground_truth_box_format,
positive_threshold,
negative_threshold,
samples_per_image,
positive_fraction,
box_variance=[0.1, 0.1, 0.2, 0.2],
**kwargs,
):
assert_tf_keras("keras_cv.layers._RpnLabelEncoder")
super().__init__(**kwargs)
self.anchor_format = anchor_format
self.ground_truth_box_format = ground_truth_box_format
self.positive_threshold = positive_threshold
self.negative_threshold = negative_threshold
self.samples_per_image = samples_per_image
self.positive_fraction = positive_fraction
self.box_matcher = box_matcher.BoxMatcher(
thresholds=[negative_threshold, positive_threshold],
match_values=[-1, -2, 1],
force_match_for_each_col=False,
)
self.box_variance = box_variance
self.built = True
self._positives = keras.metrics.Mean(name="percent_boxes_matched")
def call(
self,
anchors_dict: Mapping[str, tf.Tensor],
gt_boxes: tf.Tensor,
gt_classes: tf.Tensor,
):
"""
Args:
anchors_dict: dict of [num_anchors, 4] or [batch_size, num_anchors, 4]
float Tensor for each level.
gt_boxes: [num_gt, 4] or [batch_size, num_anchors] float Tensor.
gt_classes: [num_gt, 1] float or integer Tensor.
Returns:
box_targets: dict of [num_anchors, 4] or for each level.
box_weights: dict of [num_anchors, 1] for each level.
class_targets: dict of [num_anchors, 1] for each level.
class_weights: dict of [num_anchors, 1] for each level.
"""
pack = False
anchors = anchors_dict
if isinstance(anchors, dict):
pack = True
anchors = tf.concat(tf.nest.flatten(anchors), axis=0)
anchors = bounding_box.convert_format(
anchors, source=self.anchor_format, target="yxyx"
)
gt_boxes = bounding_box.convert_format(
gt_boxes, source=self.ground_truth_box_format, target="yxyx"
)
# [num_anchors, num_gt] or [batch_size, num_anchors, num_gt]
similarity_mat = iou.compute_iou(
anchors, gt_boxes, bounding_box_format="yxyx"
)
# [num_anchors] or [batch_size, num_anchors]
matched_gt_indices, matched_vals = self.box_matcher(similarity_mat)
# [num_anchors] or [batch_size, num_anchors]
positive_matches = tf.math.equal(matched_vals, 1)
# currently SyncOnReadVariable does not support `assign_add` in
# cross-replica.
# self._positives.update_state(
# tf.reduce_sum(tf.cast(positive_matches, tf.float32), axis=-1)
# )
negative_matches = tf.math.equal(matched_vals, -1)
# [num_anchors, 4] or [batch_size, num_anchors, 4]
matched_gt_boxes = target_gather._target_gather(
gt_boxes, matched_gt_indices
)
# [num_anchors, 4] or [batch_size, num_anchors, 4], used as `y_true` for
# regression loss
encoded_box_targets = bounding_box._encode_box_to_deltas(
anchors,
matched_gt_boxes,
anchor_format="yxyx",
box_format="yxyx",
variance=self.box_variance,
)
# [num_anchors, 1] or [batch_size, num_anchors, 1]
box_sample_weights = tf.cast(
positive_matches[..., tf.newaxis], gt_boxes.dtype
)
# [num_anchors, 1] or [batch_size, num_anchors, 1]
positive_mask = tf.expand_dims(positive_matches, axis=-1)
# set all negative and ignored matches to 0, and all positive matches to
# 1 [num_anchors, 1] or [batch_size, num_anchors, 1]
positive_classes = tf.ones_like(positive_mask, dtype=gt_classes.dtype)
negative_classes = tf.zeros_like(positive_mask, dtype=gt_classes.dtype)
# [num_anchors, 1] or [batch_size, num_anchors, 1]
class_targets = tf.where(
positive_mask, positive_classes, negative_classes
)
# [num_anchors] or [batch_size, num_anchors]
sampled_indicators = sampling.balanced_sample(
positive_matches,
negative_matches,
self.samples_per_image,
self.positive_fraction,
)
# [num_anchors, 1] or [batch_size, num_anchors, 1]
class_sample_weights = tf.cast(
sampled_indicators[..., tf.newaxis], gt_classes.dtype
)
if pack:
encoded_box_targets = self.unpack_targets(
encoded_box_targets, anchors_dict
)
box_sample_weights = self.unpack_targets(
box_sample_weights, anchors_dict
)
class_targets = self.unpack_targets(class_targets, anchors_dict)
class_sample_weights = self.unpack_targets(
class_sample_weights, anchors_dict
)
return (
encoded_box_targets,
box_sample_weights,
class_targets,
class_sample_weights,
)
def unpack_targets(self, targets, anchors_dict):
target_shape = len(targets.get_shape().as_list())
if target_shape != 2 and target_shape != 3:
raise ValueError(
"unpacking targets must be rank 2 or rank 3, got "
f"{target_shape}"
)
unpacked_targets = {}
count = 0
for level, anchors in anchors_dict.items():
num_anchors_lvl = anchors.get_shape().as_list()[0]
if target_shape == 2:
unpacked_targets[level] = targets[
count : count + num_anchors_lvl, ...
]
else:
unpacked_targets[level] = targets[
:, count : count + num_anchors_lvl, ...
]
count += num_anchors_lvl
return unpacked_targets
def get_config(self):
config = {
"anchor_format": self.anchor_format,
"ground_truth_box_format": self.ground_truth_box_format,
"positive_threshold": self.positive_threshold,
"negative_threshold": self.negative_threshold,
"samples_per_image": self.samples_per_image,
"positive_fraction": self.positive_fraction,
"box_variance": self.box_variance,
}
return config
| keras-cv/keras_cv/layers/object_detection/rpn_label_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/rpn_label_encoder.py",
"repo_id": "keras-cv",
"token_count": 4127
} | 61 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class AugMixTest(TestCase):
def test_return_shapes(self):
layer = preprocessing.AugMix([0, 255])
# RGB
xs = tf.ones((2, 512, 512, 3))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 3))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
# greyscale
xs = tf.ones((2, 512, 512, 1))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 1))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 1))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 1))
def test_in_single_image_and_mask(self):
layer = preprocessing.AugMix([0, 255])
# RGB
xs = tf.cast(
tf.ones((512, 512, 3)),
dtype=tf.float32,
)
xs = layer(xs)
ys_segmentation_masks = tf.cast(
tf.ones((512, 512, 3)),
dtype=tf.float32,
)
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (512, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (512, 512, 3))
# greyscale
xs = tf.cast(
tf.ones((512, 512, 1)),
dtype=tf.float32,
)
xs = layer(xs)
ys_segmentation_masks = tf.cast(
tf.ones((512, 512, 1)),
dtype=tf.float32,
)
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (512, 512, 1))
self.assertEqual(ys_segmentation_masks.shape, (512, 512, 1))
def test_non_square_images_and_masks(self):
layer = preprocessing.AugMix([0, 255])
# RGB
xs = tf.ones((2, 256, 512, 3))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 256, 512, 3))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 256, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (2, 256, 512, 3))
# greyscale
xs = tf.ones((2, 256, 512, 1))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 256, 512, 1))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 256, 512, 1))
self.assertEqual(ys_segmentation_masks.shape, (2, 256, 512, 1))
def test_single_input_args(self):
layer = preprocessing.AugMix([0, 255])
# RGB
xs = tf.ones((2, 512, 512, 3))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 3))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
# greyscale
xs = tf.ones((2, 512, 512, 1))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 1))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 1))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 1))
def test_many_augmentations(self):
layer = preprocessing.AugMix([0, 255], chain_depth=[25, 26])
# RGB
xs = tf.ones((2, 512, 512, 3))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 3))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
# greyscale
xs = tf.ones((2, 512, 512, 1))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 1))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 1))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 1))
| keras-cv/keras_cv/layers/preprocessing/aug_mix_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/aug_mix_test.py",
"repo_id": "keras-cv",
"token_count": 2242
} | 62 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
import keras_cv
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.grid_mask import GridMask
from keras_cv.tests.test_case import TestCase
class GridMaskTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
layer = GridMask(ratio_factor=0.1, rotation_factor=(-0.2, 0.3))
xs = layer(xs, training=True)
self.assertEqual(xs.shape, (2, 512, 512, 3))
def test_gridmask_call_results_one_channel(self):
xs = tf.cast(
tf.stack(
[3 * tf.ones((40, 40, 1)), 2 * tf.ones((40, 40, 1))],
axis=0,
),
dtype=tf.float32,
)
fill_value = 0.0
layer = GridMask(
ratio_factor=0.3,
rotation_factor=(0.2, 0.3),
fill_mode="constant",
fill_value=fill_value,
)
xs = layer(xs, training=True)
# Some pixels should be replaced with fill_value
self.assertTrue(
np.any(ops.convert_to_numpy(xs[0]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 3.0))
self.assertTrue(
np.any(ops.convert_to_numpy(xs[1]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
def test_non_square_image(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((1024, 512, 1)), tf.ones((1024, 512, 1))],
axis=0,
),
dtype=tf.float32,
)
fill_value = 100.0
layer = GridMask(
ratio_factor=0.6,
rotation_factor=0.3,
fill_mode="constant",
fill_value=fill_value,
)
xs = layer(xs, training=True)
# Some pixels should be replaced with fill_value
self.assertTrue(
np.any(ops.convert_to_numpy(xs[0]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(
np.any(ops.convert_to_numpy(xs[1]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0
),
dtype=tf.float32,
)
fill_value = 255.0
layer = GridMask(
ratio_factor=keras_cv.ConstantFactorSampler(0.5),
rotation_factor=0.5,
fill_mode="constant",
fill_value=fill_value,
)
@tf.function
def augment(x):
return layer(x, training=True)
xs = augment(xs)
# Some pixels should be replaced with fill_value
self.assertTrue(
np.any(ops.convert_to_numpy(xs[0]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(
np.any(ops.convert_to_numpy(xs[1]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
def test_in_single_image(self):
xs = tf.cast(
tf.ones((512, 512, 1)),
dtype=tf.float32,
)
layer = GridMask(
ratio_factor=(0.5, 0.5), fill_mode="constant", fill_value=0.0
)
xs = layer(xs, training=True)
self.assertTrue(np.any(ops.convert_to_numpy(xs) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs) == 1.0))
| keras-cv/keras_cv/layers/preprocessing/grid_mask_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/grid_mask_test.py",
"repo_id": "keras-cv",
"token_count": 2110
} | 63 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers import preprocessing
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
@keras_cv_export("keras_cv.layers.RandomAugmentationPipeline")
class RandomAugmentationPipeline(BaseImageAugmentationLayer):
"""RandomAugmentationPipeline constructs a pipeline based on provided
arguments.
The implemented policy does the following: for each input provided in
`call`(), the policy first inputs a random number, if the number is < rate,
the policy then selects a random layer from the provided list of `layers`.
It then calls the `layer()` on the inputs. This is done
`augmentations_per_image` times.
This layer can be used to create custom policies resembling `RandAugment` or
`AutoAugment`.
Usage:
```python
# construct a list of layers
layers = keras_cv.layers.RandAugment.get_standard_policy(
value_range=(0, 255), magnitude=0.75, magnitude_stddev=0.3
)
layers = layers[:4] # slice out some layers you don't want for whatever
reason
layers = layers + [keras_cv.layers.GridMask()]
# create the pipeline.
pipeline = keras_cv.layers.RandomAugmentationPipeline(
layers=layers, augmentations_per_image=3
)
augmented_images = pipeline(images)
```
Args:
layers: a list of `keras.Layers`. These are randomly inputs during
augmentation to augment the inputs passed in `call()`. The layers
passed should subclass `BaseImageAugmentationLayer`. Passing
`layers=[]` would result in a no-op.
augmentations_per_image: the number of layers to apply to each inputs in
the `call()` method.
rate: the rate at which to apply each augmentation. This is applied on a
per augmentation bases, so if `augmentations_per_image=3` and
`rate=0.5`, the odds an image will receive no augmentations is
0.5^3, or 0.5*0.5*0.5.
auto_vectorize: whether to use `tf.vectorized_map` or `tf.map_fn` to
apply the augmentations. This offers a significant performance
boost, but can only be used if all the layers provided to the
`layers` argument support auto vectorization.
seed: Integer. Used to create a random seed.
"""
def __init__(
self,
layers,
augmentations_per_image,
rate=1.0,
auto_vectorize=False,
seed=None,
**kwargs,
):
super().__init__(**kwargs, seed=seed)
self.augmentations_per_image = augmentations_per_image
self.rate = rate
self.layers = list(layers)
self.auto_vectorize = auto_vectorize
self.seed = seed
self._random_choice = preprocessing.RandomChoice(
layers=layers, auto_vectorize=auto_vectorize, seed=seed
)
def _augment(self, inputs):
if self.layers == []:
return inputs
result = inputs
for _ in range(self.augmentations_per_image):
skip_augment = self._random_generator.uniform(
shape=(), minval=0.0, maxval=1.0, dtype=tf.float32
)
result = tf.cond(
skip_augment > self.rate,
lambda: result,
lambda: self._random_choice(result),
)
return result
def get_config(self):
config = super().get_config()
config.update(
{
"augmentations_per_image": self.augmentations_per_image,
"auto_vectorize": self.auto_vectorize,
"rate": self.rate,
"layers": self.layers,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
layers = config.pop("layers", None)
if layers:
if isinstance(layers[0], dict):
layers = keras.utils.deserialize_keras_object(layers)
config["layers"] = layers
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_augmentation_pipeline.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_augmentation_pipeline.py",
"repo_id": "keras-cv",
"token_count": 1946
} | 64 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import bounding_box
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomCropAndResizeTest(TestCase):
height, width = 300, 300
batch_size = 4
target_size = (224, 224)
seed = 42
def test_train_augments_image(self):
# Checks if original and augmented images are different
input_image_shape = (self.batch_size, self.height, self.width, 3)
image = tf.random.uniform(shape=input_image_shape, seed=self.seed)
layer = preprocessing.RandomCropAndResize(
target_size=self.target_size,
aspect_ratio_factor=(3 / 4, 4 / 3),
crop_area_factor=(0.8, 1.0),
seed=self.seed,
)
output = layer(image, training=True)
input_image_resized = tf.image.resize(image, self.target_size)
self.assertNotAllClose(output, input_image_resized)
def test_grayscale(self):
input_image_shape = (self.batch_size, self.height, self.width, 1)
image = tf.random.uniform(shape=input_image_shape)
layer = preprocessing.RandomCropAndResize(
target_size=self.target_size,
aspect_ratio_factor=(3 / 4, 4 / 3),
crop_area_factor=(0.8, 1.0),
)
output = layer(image, training=True)
input_image_resized = tf.image.resize(image, self.target_size)
self.assertAllEqual(output.shape, (4, 224, 224, 1))
self.assertNotAllClose(output, input_image_resized)
@parameterized.named_parameters(
("Not tuple or list", dict()),
("Length not equal to 2", [1, 2, 3]),
("Members not int", (2.3, 4.5)),
("Single integer", 5),
)
def test_target_size_errors(self, target_size):
with self.assertRaisesRegex(
ValueError,
"`target_size` must be tuple of two integers. "
"Received target_size=(.*)",
):
_ = preprocessing.RandomCropAndResize(
target_size=target_size,
aspect_ratio_factor=(3 / 4, 4 / 3),
crop_area_factor=(0.8, 1.0),
)
@parameterized.named_parameters(
("Not tuple or list", dict()),
("Single integer", 5),
("Single float", 5.0),
)
def test_aspect_ratio_factor_errors(self, aspect_ratio_factor):
with self.assertRaisesRegex(
ValueError,
"`aspect_ratio_factor` must be tuple of two positive floats or "
"keras_cv.core.FactorSampler instance. "
"Received aspect_ratio_factor=(.*)",
):
_ = preprocessing.RandomCropAndResize(
target_size=(224, 224),
aspect_ratio_factor=aspect_ratio_factor,
crop_area_factor=(0.8, 1.0),
)
@parameterized.named_parameters(
("Not tuple or list", dict()),
("Single integer", 5),
("Single float", 5.0),
)
def test_crop_area_factor_errors(self, crop_area_factor):
with self.assertRaisesRegex(
ValueError,
"`crop_area_factor` must be tuple of two positive floats less than "
"or equal to 1 or keras_cv.core.FactorSampler instance. "
"Received crop_area_factor=(.*)",
):
_ = preprocessing.RandomCropAndResize(
target_size=(224, 224),
aspect_ratio_factor=(3 / 4, 4 / 3),
crop_area_factor=crop_area_factor,
)
def test_augment_sparse_segmentation_mask(self):
num_classes = 8
input_image_shape = (1, self.height, self.width, 3)
mask_shape = (1, self.height, self.width, 1)
image = tf.random.uniform(shape=input_image_shape, seed=self.seed)
mask = tf.constant(
np.random.randint(2, size=mask_shape) * (num_classes - 1)
)
inputs = {"images": image, "segmentation_masks": mask}
# Crop-only to exactly 1/2 of the size
layer = preprocessing.RandomCropAndResize(
target_size=(150, 150),
aspect_ratio_factor=(1, 1),
crop_area_factor=(1, 1),
seed=self.seed,
)
input_mask_resized = tf.image.crop_and_resize(
mask, [[0, 0, 1, 1]], [0], (150, 150), "nearest"
)
output = layer(inputs, training=True)
self.assertAllClose(output["segmentation_masks"], input_mask_resized)
# Crop to an arbitrary size and make sure we don't do bad interpolation
layer = preprocessing.RandomCropAndResize(
target_size=(233, 233),
aspect_ratio_factor=(3 / 4, 4 / 3),
crop_area_factor=(0.8, 1.0),
seed=self.seed,
)
output = layer(inputs, training=True)
self.assertAllInSet(
ops.convert_to_numpy(output["segmentation_masks"]), [0, 7]
)
def test_augment_one_hot_segmentation_mask(self):
num_classes = 8
input_image_shape = (1, self.height, self.width, 3)
mask_shape = (1, self.height, self.width, 1)
image = tf.random.uniform(shape=input_image_shape, seed=self.seed)
mask = tf.one_hot(
tf.squeeze(
np.random.randint(2, size=mask_shape) * (num_classes - 1),
axis=-1,
),
num_classes,
)
inputs = {"images": image, "segmentation_masks": mask}
# Crop-only to exactly 1/2 of the size
layer = preprocessing.RandomCropAndResize(
target_size=(150, 150),
aspect_ratio_factor=(1, 1),
crop_area_factor=(1, 1),
seed=self.seed,
)
input_mask_resized = tf.image.crop_and_resize(
mask, [[0, 0, 1, 1]], [0], (150, 150), "nearest"
)
output = layer(inputs, training=True)
self.assertAllClose(output["segmentation_masks"], input_mask_resized)
def test_augment_bounding_box_single(self):
image = tf.zeros([20, 20, 3])
boxes = {
"boxes": tf.convert_to_tensor([[0, 0, 1, 1]]),
"classes": tf.convert_to_tensor([0]),
}
input = {"images": image, "bounding_boxes": boxes}
layer = preprocessing.RandomCropAndResize(
target_size=(10, 10),
crop_area_factor=(0.5**2, 0.5**2),
aspect_ratio_factor=(1.0, 1.0),
bounding_box_format="rel_xyxy",
)
output = layer(input, training=True)
expected_output = {
"boxes": tf.convert_to_tensor([[0, 0, 1, 1]], dtype=tf.float32),
"classes": tf.convert_to_tensor([0], dtype=tf.float32),
}
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(
expected_output["boxes"], output["bounding_boxes"]["boxes"]
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_augment_boxes_batched_input(self):
image = tf.zeros([20, 20, 3])
boxes = {
"boxes": tf.convert_to_tensor(
[
[[0, 0, 1, 1], [0, 0, 1, 1]],
[[0, 0, 1, 1], [0, 0, 1, 1]],
]
),
"classes": tf.convert_to_tensor([[0, 0], [0, 0]]),
}
input = {"images": [image, image], "bounding_boxes": boxes}
layer = preprocessing.RandomCropAndResize(
target_size=(18, 18),
crop_area_factor=(0.5**2, 0.5**2),
aspect_ratio_factor=(1.0, 1.0),
bounding_box_format="rel_xyxy",
)
output = layer(input, training=True)
expected_output = {
"boxes": tf.convert_to_tensor(
[
[[0, 0, 1, 1], [0, 0, 1, 1]],
[[0, 0, 1, 1], [0, 0, 1, 1]],
]
),
"classes": tf.convert_to_tensor([[0, 0], [0, 0]]),
}
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(
expected_output["boxes"], output["bounding_boxes"]["boxes"]
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_augment_boxes_ragged(self):
image = tf.zeros([2, 20, 20, 3])
boxes = {
"boxes": tf.ragged.constant(
[[[0, 0, 1, 1], [0, 0, 1, 1]], [[0, 0, 1, 1]]], dtype=tf.float32
),
"classes": tf.ragged.constant([[0, 0], [0]]),
}
input = {"images": image, "bounding_boxes": boxes}
layer = preprocessing.RandomCropAndResize(
target_size=(18, 18),
crop_area_factor=(0.5**2, 0.5**2),
aspect_ratio_factor=(1.0, 1.0),
bounding_box_format="rel_xyxy",
)
output = layer(input, training=True)
# the result boxes will still have the entire image in them
expected_output = {
"boxes": tf.ragged.constant(
[[[0, 0, 1, 1], [0, 0, 1, 1]], [[0, 0, 1, 1]]], dtype=tf.float32
),
"classes": tf.ragged.constant([[0, 0], [0]]),
}
expected_output = bounding_box.to_dense(expected_output)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(
expected_output["boxes"], output["bounding_boxes"]["boxes"]
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
| keras-cv/keras_cv/layers/preprocessing/random_crop_and_resize_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_crop_and_resize_test.py",
"repo_id": "keras-cv",
"token_count": 5074
} | 65 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomSharpness")
class RandomSharpness(VectorizedBaseImageAugmentationLayer):
"""Randomly performs the sharpness operation on given images.
The sharpness operation first performs a blur operation, then blends between
the original image and the blurred image. This operation makes the edges of
an image less sharp than they were in the original image.
References:
- [PIL](https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html)
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image sharpness is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of 1.0 uses the sharpened result
entirely. Values between 0 and 1 result in linear interpolation
between the original image and the sharpened image. Values should be
between `0.0` and `1.0`. If a tuple is used, a `factor` is sampled
between the two values for every image augmented. If a single float
is used, a value between `0.0` and the passed float is sampled. In
order to ensure the value is always the same, please pass a tuple
with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
""" # noqa: E501
def __init__(
self,
factor,
value_range,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.value_range = value_range
self.factor = preprocessing.parse_factor(factor)
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
return self.factor(
shape=(batch_size, 1, 1, 1), dtype=self.compute_dtype
)
def augment_images(self, images, transformations, **kwargs):
images = preprocessing.transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
original_images = images
# [1 1 1]
# [1 5 1]
# [1 1 1]
# all divided by 13 is the default 3x3 gaussian smoothing kernel.
# Correlating or Convolving with this filter is equivalent to performing
# a gaussian blur.
kernel = (
tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=self.compute_dtype,
shape=[3, 3, 1, 1],
)
/ 13.0
)
# Tile across channel dimension.
channels = tf.shape(images)[-1]
kernel = tf.tile(kernel, [1, 1, channels, 1])
strides = [1, 1, 1, 1]
smoothed_image = tf.nn.depthwise_conv2d(
images, kernel, strides, padding="VALID", dilations=[1, 1]
)
smoothed_image = tf.clip_by_value(smoothed_image, 0.0, 255.0)
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(smoothed_image)
padded_mask = tf.pad(mask, [[0, 0], [1, 1], [1, 1], [0, 0]])
padded_smoothed_image = tf.pad(
smoothed_image, [[0, 0], [1, 1], [1, 1], [0, 0]]
)
result = tf.where(
tf.equal(padded_mask, 1), padded_smoothed_image, original_images
)
# Blend the final result.
result = preprocessing.blend(original_images, result, transformations)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return result
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_ragged_image(self, image, transformation, **kwargs):
images = tf.expand_dims(image, axis=0)
new_transformation = tf.expand_dims(transformation, axis=0)
output = self.augment_images(images, new_transformation)
return tf.squeeze(output, axis=0)
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_sharpness.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_sharpness.py",
"repo_id": "keras-cv",
"token_count": 2411
} | 66 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tree
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import config
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend import scope
from keras_cv.utils import preprocessing
H_AXIS = -3
W_AXIS = -2
IMAGES = "images"
LABELS = "labels"
TARGETS = "targets"
BOUNDING_BOXES = "bounding_boxes"
KEYPOINTS = "keypoints"
SEGMENTATION_MASKS = "segmentation_masks"
IS_DICT = "is_dict"
BATCHED = "batched"
USE_TARGETS = "use_targets"
@keras_cv_export("keras_cv.layers.VectorizedBaseImageAugmentationLayer")
class VectorizedBaseImageAugmentationLayer(keras.layers.Layer):
"""Abstract base layer for vectorized image augmentation.
This layer contains base functionalities for preprocessing layers which
augment image related data, e.g. image and in the future, label and bounding
boxes. The subclasses could avoid making certain mistakes and reduce code
duplications.
This layer requires you to implement one method: `augment_images()`, which
augments one single image during the training. There are a few additional
methods that you can implement for added functionality on the layer:
`augment_labels()`, which handles label augmentation if the layer supports
that.
`augment_bounding_boxes()`, which handles the bounding box augmentation, if
the layer supports that.
`get_random_transformations()`, which should produce a batch of random
transformation settings. The transformation object, which must be a batched
Tensor or a dictionary where each input is a batched Tensor, will be passed
to `augment_images`, `augment_labels` and `augment_bounding_boxes`, to
coordinate the randomness behavior, eg, in the RandomFlip layer, the image
and bounding_boxes should be changed in the same way.
The `call()` method support two formats of inputs:
1. Single image tensor with 3D (HWC) or 4D (NHWC) format.
2. A dict of tensors with stable keys. The supported keys are:
`"images"`, `"labels"` and `"bounding_boxes"` at the moment. We might add
more keys in future when we support more types of augmentation.
The output of the `call()` will be in two formats, which will be the same
structure as the inputs.
The `call()` will unpack the inputs, forward to the correct function, and
pack the output back to the same structure as the inputs.
By default, the dense or ragged status of the output will be preserved.
However, you can override this behavior by setting
`self.force_output_dense_images = True`,
`self.force_output_dense_segmentation_masks = True` in your `__init__()`
method. When enabled, images and segmentation masks will be converted to
dense tensor by `to_tensor()` if ragged.
```python
class SubclassLayer(VectorizedBaseImageAugmentationLayer):
def __init__(self):
super().__init__()
self.force_output_dense_images = True
self.force_output_dense_segmentation_masks = True
```
Note that since the randomness is also a common functionality, this layer
also includes a keras_backend.RandomGenerator, which can be used to
produce the random numbers. The random number generator is stored in the
`self._random_generator` attribute.
"""
def __init__(self, seed=None, **kwargs):
super().__init__(**kwargs)
if seed:
self._random_generator = tf.random.Generator.from_seed(seed=seed)
else:
self._random_generator = tf.random.get_global_generator()
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
@property
def force_output_dense_images(self):
"""Control whether to force outputting of dense images."""
return getattr(self, "_force_output_dense_images", False)
@force_output_dense_images.setter
def force_output_dense_images(self, force_output_dense_images):
self._force_output_dense_images = force_output_dense_images
@property
def force_output_dense_segmentation_masks(self):
"""Control whether to force outputting of dense segmentation masks."""
return getattr(self, "_force_output_dense_segmentation_masks", False)
@force_output_dense_segmentation_masks.setter
def force_output_dense_segmentation_masks(
self, force_output_dense_segmentation_masks
):
self._force_output_dense_segmentation_masks = (
force_output_dense_segmentation_masks
)
def augment_ragged_image(self, image, transformation, **kwargs):
"""Augment an image from a ragged image batch during training.
This method accepts a single Dense image Tensor, and returns a Dense
image. The resulting images are then stacked back into a ragged image
batch. The behavior of this method should be identical to that of
`augment_images()` but is to operate on a batch-wise basis.
Args:
image: a single image from the batch
transformation: a single transformation sampled from
`get_random_transformations()`.
kwargs: all the other call arguments (i.e. bounding_boxes, labels,
etc.).
Returns:
Augmented image.
"""
raise NotImplementedError(
"A ragged image batch was passed to layer of type "
f"`{type(self).__name__}`. This layer does not implement "
"`augment_ragged_image()`. If this is a `keras_cv`, open a GitHub "
"issue requesting Ragged functionality on the layer titled: "
f"'`{type(self).__name__}`: ragged image support'. If this is a "
"custom layer, implement the `augment_ragged_image()` method."
)
def compute_ragged_image_signature(self, images):
"""Computes the output image signature for the `augment_image()`
function.
Must be overridden to return tensors with different shapes than the
input images. By default, returns either a `tf.RaggedTensorSpec`
matching the input image spec, or a `tf.TensorSpec` matching the input
image spec.
"""
ragged_spec = tf.RaggedTensorSpec(
shape=images.shape[1:],
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
def augment_images(self, images, transformations, **kwargs):
"""Augment a batch of images during training.
Args:
images: 4D image input tensor to the layer. Forwarded from
`layer.call()`. This should generally have the shape [B, H, W, C].
Forwarded from `layer.call()`.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 4D tensor, which will be forward to `layer.call()`.
"""
raise NotImplementedError()
def augment_labels(self, labels, transformations, **kwargs):
"""Augment a batch of labels during training.
Args:
labels: 2D label to the layer. Forwarded from `layer.call()`.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 2D tensor, which will be forward to `layer.call()`.
"""
raise NotImplementedError()
def augment_targets(self, targets, transformations, **kwargs):
"""Augment a batch of targets during training.
Args:
targets: 2D label to the layer. Forwarded from `layer.call()`.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 2D tensor, which will be forward to `layer.call()`.
"""
return self.augment_labels(targets, transformations, **kwargs)
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
"""Augment bounding boxes for one image during training.
Args:
bounding_boxes: 3D bounding boxes to the layer. Forwarded from
`call()`.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 3D tensor, which will be forward to `layer.call()`.
"""
raise NotImplementedError()
def augment_keypoints(self, keypoints, transformations, **kwargs):
"""Augment a batch of keypoints for one image during training.
Args:
keypoints: 3D keypoints input tensor to the layer. Forwarded from
`layer.call()`. Shape should be [batch, num_keypoints, 2] in the
specified keypoint format.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 3D tensor, which will be forward to `layer.call()`.
"""
raise NotImplementedError()
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
"""Augment a batch of images' segmentation masks during training.
Args:
segmentation_masks: 4D segmentation mask input tensor to the layer.
This should generally have the shape [B, H, W, 1], or in some cases
[B, H, W, C] for multilabeled data. Forwarded from `layer.call()`.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 4D tensor containing the augmented segmentation mask, which
will be forward to `layer.call()`.
"""
raise NotImplementedError()
def get_random_transformation_batch(
self,
batch_size,
images=None,
labels=None,
bounding_boxes=None,
keypoints=None,
segmentation_masks=None,
):
"""Produce random transformations config for a batch of inputs.
This is used to produce same randomness between
image/label/bounding_box.
Args:
batch_size: the batch size of transformations configuration to sample.
images: 3D image tensor from inputs.
labels: optional 1D label tensor from inputs.
bounding_boxes: optional 2D bounding boxes tensor from inputs.
segmentation_masks: optional 3D segmentation mask tensor from inputs.
Returns:
Any type of object, which will be forwarded to `augment_images`,
`augment_labels` and `augment_bounding_boxes` as the `transformations`
parameter.
"""
# Required to work with map_fn in the ragged cast.
return tf.zeros((batch_size))
def _unwrap_ragged_image_call(self, inputs):
images = inputs.get(IMAGES, None)
labels = inputs.get(LABELS, None)
bounding_boxes = inputs.get(BOUNDING_BOXES, None)
keypoints = inputs.get(KEYPOINTS, None)
segmentation_masks = inputs.get(SEGMENTATION_MASKS, None)
transformation = inputs.get("transformations")
images = images.to_tensor()
images = self.augment_ragged_image(
image=images,
label=labels,
bounding_boxes=bounding_boxes,
keypoints=keypoints,
segmentation_mask=segmentation_masks,
transformation=transformation,
)
return tf.RaggedTensor.from_tensor(images)
def _batch_augment(self, inputs):
images = inputs.get(IMAGES, None)
raw_images = images
labels = inputs.get(LABELS, None)
bounding_boxes = inputs.get(BOUNDING_BOXES, None)
keypoints = inputs.get(KEYPOINTS, None)
segmentation_masks = inputs.get(SEGMENTATION_MASKS, None)
batch_size = tf.shape(images)[0]
transformations = self.get_random_transformation_batch(
batch_size,
images=images,
labels=labels,
bounding_boxes=bounding_boxes,
keypoints=keypoints,
segmentation_masks=segmentation_masks,
)
if isinstance(images, tf.RaggedTensor):
inputs_for_raggeds = {"transformations": transformations, **inputs}
images = tf.map_fn(
self._unwrap_ragged_image_call,
inputs_for_raggeds,
fn_output_signature=self.compute_ragged_image_signature(images),
)
else:
images = self.augment_images(
images,
transformations=transformations,
bounding_boxes=bounding_boxes,
labels=labels,
)
if (
isinstance(images, tf.RaggedTensor)
and self.force_output_dense_images
):
images = images.to_tensor()
result = {IMAGES: images}
if labels is not None:
labels = self.augment_targets(
labels,
transformations=transformations,
bounding_boxes=bounding_boxes,
images=images,
raw_images=raw_images,
)
result[LABELS] = labels
if bounding_boxes is not None:
bounding_boxes = self.augment_bounding_boxes(
bounding_boxes,
transformations=transformations,
labels=labels,
images=images,
raw_images=raw_images,
)
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
result[BOUNDING_BOXES] = bounding_boxes
if keypoints is not None:
keypoints = self.augment_keypoints(
keypoints,
transformations=transformations,
labels=labels,
bounding_boxes=bounding_boxes,
images=images,
raw_images=raw_images,
)
result[KEYPOINTS] = keypoints
if segmentation_masks is not None:
segmentation_masks = self.augment_segmentation_masks(
segmentation_masks,
transformations=transformations,
labels=labels,
bounding_boxes=bounding_boxes,
images=images,
raw_images=raw_images,
)
if (
isinstance(segmentation_masks, tf.RaggedTensor)
and self.force_output_dense_segmentation_masks
):
segmentation_masks = segmentation_masks.to_tensor()
result[SEGMENTATION_MASKS] = segmentation_masks
# preserve any additional inputs unmodified by this layer.
for key in inputs.keys() - result.keys():
result[key] = inputs[key]
return result
def call(self, inputs):
# try to convert a given backend native tensor to TensorFlow tensor
# before passing it over to TFDataScope
is_tf_backend = config.backend() == "tensorflow"
is_in_tf_graph = not tf.executing_eagerly()
contains_ragged = lambda y: any(
tree.map_structure(
lambda x: isinstance(x, (tf.RaggedTensor, tf.SparseTensor)),
tree.flatten(y),
)
)
inputs_contain_ragged = contains_ragged(inputs)
if not is_tf_backend and not inputs_contain_ragged:
inputs = tree.map_structure(
lambda x: tf.convert_to_tensor(x), inputs
)
with scope.TFDataScope():
inputs = self._ensure_inputs_are_compute_dtype(inputs)
inputs, metadata = self._format_inputs(inputs)
images = inputs[IMAGES]
if images.shape.rank == 3 or images.shape.rank == 4:
outputs = self._format_output(
self._batch_augment(inputs), metadata
)
else:
raise ValueError(
"Image augmentation layers are expecting inputs to be "
"rank 3 (HWC) or 4D (NHWC) tensors. Got shape: "
f"{images.shape}"
)
# convert the outputs to backend native tensors if none of them
# contain RaggedTensors. Note that if the user passed in Raggeds
# but the outputs are dense, we still don't want to convert to
# backend native tensors. This is to avoid breaking TF data
# pipelines that can't easily be ported to become backend
# agnostic.
if not is_tf_backend and not is_in_tf_graph:
if not inputs_contain_ragged and not contains_ragged(outputs):
outputs = tree.map_structure(
# some layers return None, handle that case when
# converting to tensors
lambda x: ops.convert_to_tensor(x) if x is not None else x,
outputs,
)
return outputs
def _format_inputs(self, inputs):
metadata = {IS_DICT: True, USE_TARGETS: False}
if tf.is_tensor(inputs):
# single image input tensor
metadata[IS_DICT] = False
inputs = {IMAGES: inputs}
else:
# Copy the input dict before we mutate it.
inputs = dict(inputs)
metadata[BATCHED] = inputs["images"].shape.rank == 4
if inputs["images"].shape.rank == 3:
for key in list(inputs.keys()):
if key == BOUNDING_BOXES:
inputs[BOUNDING_BOXES]["boxes"] = tf.expand_dims(
inputs[BOUNDING_BOXES]["boxes"], axis=0
)
inputs[BOUNDING_BOXES]["classes"] = tf.expand_dims(
inputs[BOUNDING_BOXES]["classes"], axis=0
)
else:
inputs[key] = tf.expand_dims(inputs[key], axis=0)
if not isinstance(inputs, dict):
raise ValueError(
"Expect the inputs to be image tensor or dict. Got "
f"inputs={inputs}"
)
if BOUNDING_BOXES in inputs:
inputs[BOUNDING_BOXES] = self._format_bounding_boxes(
inputs[BOUNDING_BOXES]
)
if isinstance(inputs, dict) and TARGETS in inputs:
# TODO(scottzhu): Check if it only contains the valid keys
inputs[LABELS] = inputs[TARGETS]
del inputs[TARGETS]
metadata[USE_TARGETS] = True
return inputs, metadata
return inputs, metadata
def _format_output(self, output, metadata):
if not metadata[BATCHED]:
for key in list(output.keys()):
if key == BOUNDING_BOXES:
output[BOUNDING_BOXES]["boxes"] = tf.squeeze(
output[BOUNDING_BOXES]["boxes"], axis=0
)
output[BOUNDING_BOXES]["classes"] = tf.squeeze(
output[BOUNDING_BOXES]["classes"], axis=0
)
else:
output[key] = tf.squeeze(output[key], axis=0)
if not metadata[IS_DICT]:
return output[IMAGES]
elif metadata[USE_TARGETS]:
output[TARGETS] = output[LABELS]
del output[LABELS]
return output
def _ensure_inputs_are_compute_dtype(self, inputs):
if not isinstance(inputs, dict):
return preprocessing.ensure_tensor(
inputs,
self.compute_dtype,
)
# Copy the input dict before we mutate it.
inputs = dict(inputs)
inputs[IMAGES] = preprocessing.ensure_tensor(
inputs[IMAGES],
self.compute_dtype,
)
if LABELS in inputs:
inputs[LABELS] = preprocessing.ensure_tensor(
inputs[LABELS],
self.compute_dtype,
)
if KEYPOINTS in inputs:
inputs[KEYPOINTS] = preprocessing.ensure_tensor(
inputs[KEYPOINTS],
self.compute_dtype,
)
if SEGMENTATION_MASKS in inputs:
inputs[SEGMENTATION_MASKS] = preprocessing.ensure_tensor(
inputs[SEGMENTATION_MASKS],
self.compute_dtype,
)
if BOUNDING_BOXES in inputs:
inputs[BOUNDING_BOXES]["boxes"] = preprocessing.ensure_tensor(
inputs[BOUNDING_BOXES]["boxes"],
self.compute_dtype,
)
inputs[BOUNDING_BOXES]["classes"] = preprocessing.ensure_tensor(
inputs[BOUNDING_BOXES]["classes"],
self.compute_dtype,
)
return inputs
def _format_bounding_boxes(self, bounding_boxes):
# We can't catch the case where this is None, sometimes RaggedTensor
# drops this dimension.
if "classes" not in bounding_boxes:
raise ValueError(
"Bounding boxes are missing class_id. If you would like to pad "
"the bounding boxes with class_id, use: "
"`bounding_boxes['classes'] = "
"tf.ones_like(bounding_boxes['boxes'])`."
)
return bounding_boxes
| keras-cv/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer.py",
"repo_id": "keras-cv",
"token_count": 9889
} | 67 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_dropping_points import ( # noqa: E501
GlobalRandomDroppingPoints,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalDropPointsTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.5)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_specific_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.5)
point_clouds = np.random.random(size=(1, 50, 2)).astype("float32")
point_clouds = np.concatenate([point_clouds, point_clouds], axis=0)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
# The augmented point clouds in the first frame should be the same as
# the augmented point clouds in the second frame.
self.assertAllClose(outputs[POINT_CLOUDS][0], outputs[POINT_CLOUDS][1])
def test_not_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.0)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_drop_all_point_clouds(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=1.0)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs[POINT_CLOUDS] * 0.0, outputs[POINT_CLOUDS])
def test_exclude_all_points(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=1.0, exclude_classes=1)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
exclude_classes = np.ones(shape=(2, 50, 1)).astype("float32")
point_clouds = np.concatenate([point_clouds, exclude_classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_exclude_the_first_half_points(self):
add_layer = GlobalRandomDroppingPoints(
drop_rate=1.0, exclude_classes=[1, 2]
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
class_1 = np.ones(shape=(2, 10, 1)).astype("float32")
class_2 = np.ones(shape=(2, 15, 1)).astype("float32") * 2
classes = np.concatenate(
[class_1, class_2, np.zeros(shape=(2, 25, 1)).astype("float32")],
axis=1,
)
point_clouds = np.concatenate([point_clouds, classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(
inputs[POINT_CLOUDS][:, 25:, :] * 0.0,
outputs[POINT_CLOUDS][:, 25:, :],
)
self.assertAllClose(
inputs[POINT_CLOUDS][:, :25, :], outputs[POINT_CLOUDS][:, :25, :]
)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.5)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_dropping_points_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_dropping_points_test.py",
"repo_id": "keras-cv",
"token_count": 2057
} | 68 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.swap_background import (
SwapBackground,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
ADDITIONAL_POINT_CLOUDS = base_augmentation_layer_3d.ADDITIONAL_POINT_CLOUDS
ADDITIONAL_BOUNDING_BOXES = base_augmentation_layer_3d.ADDITIONAL_BOUNDING_BOXES
class SwapBackgroundTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = SwapBackground()
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
additional_point_clouds = np.array(
[
[
[0, 2, 1, 3, 4],
[0, 0, 2, 0, 2],
[0, 11, 2, 3, 4],
[100, 101, 2, 3, 4],
[10, 10, 10, 10, 10],
]
]
* 2
).astype("float32")
additional_bounding_boxes = np.array(
[
[
[0, 0, 1, 4, 4, 4, 0, 1],
[100, 100, 2, 5, 5, 5, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
ADDITIONAL_POINT_CLOUDS: additional_point_clouds,
ADDITIONAL_BOUNDING_BOXES: additional_bounding_boxes,
}
outputs = add_layer(inputs)
# The following points in additional_point_clouds.
# [0, 2, 1, 3, 4], -> kept because it is in additional_point_clouds
# [0, 0, 1, 4, 4, 4, 0, 1].
# [0, 0, 2, 0, 2] -> removed because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
# [0, 11, 2, 3, 4] -> removed because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
# [100, 101, 2, 3, 4] -> kept because it is in additional_point_clouds
# [100, 100, 2, 5, 5, 5, 0, 1].
# [10, 10, 10, 10, 10] -> removed because it is a background point (not
# in any bounding_boxes and additional_point_clouds).
# The following points in point_clouds.
# [0, 1, 2, 3, 4] -> removed because it is in bounding_boxes
# [0, 0, 0, 4, 4, 4, 0, 1].
# [10, 1, 2, 3, 4] -> kept because it is a background point (not in any
# bounding_boxes and additional_point_clouds).
# [0, -1, 2, 3, 4] -> removed because it overlaps with
# additional_bounding_boxes [0, 0, 1, 4, 4, 4, 0, 1].
# [100, 100, 2, 3, 4] -> removed because it overlaps with
# additional_bounding_boxes [100, 100, 2, 5, 5, 5, 0, 1].
# [20, 20, 21, 1, 0] -> kept because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
augmented_point_clouds = np.array(
[
[
[0, 2, 1, 3, 4],
[100, 101, 2, 3, 4],
[10, 1, 2, 3, 4],
[20, 20, 21, 1, 0],
[0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 1, 4, 4, 4, 0, 1],
[100, 100, 2, 5, 5, 5, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(
inputs[ADDITIONAL_POINT_CLOUDS], outputs[ADDITIONAL_POINT_CLOUDS]
)
self.assertAllClose(
inputs[ADDITIONAL_BOUNDING_BOXES],
outputs[ADDITIONAL_BOUNDING_BOXES],
)
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = SwapBackground()
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
]
* 3
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
additional_point_clouds = np.array(
[
[
[
[0, 2, 1, 3, 4],
[0, 0, 2, 0, 2],
[0, 11, 2, 3, 4],
[100, 101, 2, 3, 4],
[10, 10, 10, 10, 10],
]
]
* 2
]
* 3
).astype("float32")
additional_bounding_boxes = np.array(
[
[
[
[0, 0, 1, 4, 4, 4, 0, 1],
[100, 100, 2, 5, 5, 5, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
ADDITIONAL_POINT_CLOUDS: additional_point_clouds,
ADDITIONAL_BOUNDING_BOXES: additional_bounding_boxes,
}
outputs = add_layer(inputs)
# The following points in additional_point_clouds.
# [0, 2, 1, 3, 4], -> kept because it is in additional_point_clouds
# [0, 0, 1, 4, 4, 4, 0, 1].
# [0, 0, 2, 0, 2] -> removed because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
# [0, 11, 2, 3, 4] -> removed because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
# [100, 101, 2, 3, 4] -> kept because it is in additional_point_clouds
# [100, 100, 2, 5, 5, 5, 0, 1].
# [10, 10, 10, 10, 10] -> removed because it is a background point (not
# in any bounding_boxes and additional_point_clouds).
# The following points in point_clouds.
# [0, 1, 2, 3, 4] -> removed because it is in bounding_boxes\
# [0, 0, 0, 4, 4, 4, 0, 1].
# [10, 1, 2, 3, 4] -> kept because it is a background point (not in any
# bounding_boxes and additional_point_clouds).
# [0, -1, 2, 3, 4] -> removed because it overlaps with
# additional_bounding_boxes [0, 0, 1, 4, 4, 4, 0, 1].
# [100, 100, 2, 3, 4] -> removed because it overlaps with
# additional_bounding_boxes [100, 100, 2, 5, 5, 5, 0, 1].
# [20, 20, 21, 1, 0] -> kept because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
augmented_point_clouds = np.array(
[
[
[
[0, 2, 1, 3, 4],
[100, 101, 2, 3, 4],
[10, 1, 2, 3, 4],
[20, 20, 21, 1, 0],
[0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[
[0, 0, 1, 4, 4, 4, 0, 1],
[100, 100, 2, 5, 5, 5, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
self.assertAllClose(
inputs[ADDITIONAL_POINT_CLOUDS], outputs[ADDITIONAL_POINT_CLOUDS]
)
self.assertAllClose(
inputs[ADDITIONAL_BOUNDING_BOXES],
outputs[ADDITIONAL_BOUNDING_BOXES],
)
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/swap_background_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/swap_background_test.py",
"repo_id": "keras-cv",
"token_count": 6396
} | 69 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
class MLP(keras.layers.Layer):
"""A MLP block with architecture
`input_dim -> [hidden_dim] * (num_layers - 1) -> output_dim`.
Args:
hidden_dim (int): The number of units in the hidden layers.
output_dim (int): The number of units in the output layer.
num_layers (int): The total number of dense layers to use.
activation (str): Activation to use in the hidden layers.
Default is `"relu"`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
- [Detectron2](https://github.com/facebookresearch/detectron2)
""" # noqa: E501
def __init__(
self, hidden_dim, output_dim, num_layers, activation="relu", **kwargs
):
super().__init__(**kwargs)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.activation = activation
h = [hidden_dim] * (num_layers - 1)
self.dense_net = []
for hidden_dim in h:
self.dense_net.append(keras.layers.Dense(hidden_dim))
self.dense_net.append(keras.layers.Activation(activation))
self.dense_net.append(keras.layers.Dense(output_dim))
self.dense_net = keras.models.Sequential(self.dense_net)
def build(self, input_shape):
self.dense_net.build(input_shape)
self.built = True
def call(self, x):
return self.dense_net(x)
def get_config(self):
config = super().get_config()
config.update(
{
"hidden_dim": self.hidden_dim,
"output_dim": self.output_dim,
"num_layers": self.num_layers,
"activation": self.activation,
}
)
return config
@keras_cv_export(
"keras_cv.layers.AddRelativePositionalEmbedding", package="keras_cv.layers"
)
class AddRelativePositionalEmbedding(keras.layers.Layer):
def __init__(self, input_size, key_dim, **kwargs):
super().__init__(**kwargs)
self.input_size = input_size
self.key_dim = key_dim
self.rel_pos_h = self.add_weight(
name="rel_pos_h",
shape=(2 * self.input_size[0] - 1, self.key_dim),
initializer="zeros",
trainable=True,
)
self.rel_pos_w = self.add_weight(
name="rel_pos_w",
shape=(2 * self.input_size[1] - 1, self.key_dim),
initializer="zeros",
trainable=True,
)
self.built = True
def _get_rel_pos(self, query_size, key_size, rel_pos):
"""
Get relative positional embeddings according to the relative positions
of query and key sizes.
Args:
query_size (int): The number of features of the queries.
key_size (int): The number of features of the keys.
rel_pos (tensor): Relative positional embedding tensor.
Returns:
tensor: Extracted positional embeddings according to relative
positions.
"""
max_rel_dist = 2 * max(query_size, key_size) - 1
if ops.shape(rel_pos)[0] != max_rel_dist:
rel_pos_resized = ops.image.resize(
image=ops.reshape(
rel_pos,
(1, ops.shape(rel_pos)[0], ops.shape(rel_pos)[1], 1),
),
size=(max_rel_dist, ops.shape(rel_pos)[1]),
interpolation="bilinear",
)
rel_pos_resized = ops.squeeze(rel_pos_resized, axis=(0, -1))
return rel_pos_resized
else:
rel_pos_resized = rel_pos
query_coordinates = ops.cast(
ops.arange(query_size), dtype=self.compute_dtype
)[:, None] * (max(key_size / query_size, 1.0))
key_coordinates = ops.cast(
ops.arange(key_size), dtype=self.compute_dtype
)[None, :] * (max(query_size / key_size, 1.0))
relative_coordinates = (query_coordinates - key_coordinates) + (
key_size - 1
) * max(query_size / key_size, 1.0)
relative_coordinates = ops.cast(relative_coordinates, dtype="int32")
return ops.take(rel_pos_resized, relative_coordinates, 0)
def call(self, attention_map, queries, query_size, key_size):
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
Args:
attention_map (tensor): Attention map.
queries (tensor): Queries in the attention layer with shape
`(B, q_h * q_w, C)`.
query_size (tuple[int, int]): Spatial sequence size of queries with
`(q_h, q_w)`.
key_size (tuple[int, int]): Spatial sequence size of keys with
`(k_h, k_w)`.
Returns:
tensor: attention map with added relative positional embeddings.
References:
- https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa: E501
"""
query_height, query_width = query_size[0], query_size[1]
key_height, key_width = key_size[0], key_size[1]
rel_heights = self._get_rel_pos(
query_height, key_height, self.rel_pos_h
)
rel_widths = self._get_rel_pos(query_width, key_width, self.rel_pos_w)
shape = ops.shape(queries)
B, C = shape[0], shape[2]
rel_queries = ops.reshape(queries, (B, query_height, query_width, C))
rel_heights = ops.einsum("bhwc,hkc->bhwk", rel_queries, rel_heights)
rel_widths = ops.einsum("bhwc,wkc->bhwk", rel_queries, rel_widths)
attention_map = ops.reshape(
attention_map, (B, query_height, query_width, key_height, key_width)
)
attention_map = attention_map + rel_heights[..., :, None]
attention_map = attention_map + rel_widths[..., None, :]
attention_map = ops.reshape(
attention_map,
(B, query_height * query_width, key_height * key_width),
)
return attention_map
def get_config(self):
config = super().get_config()
config.update({"input_size": self.input_size, "key_dim": self.key_dim})
return config
@keras_cv_export(
"keras_cv.layers.MultiHeadAttentionWithRelativePE",
package="keras_cv.layers",
)
class MultiHeadAttentionWithRelativePE(keras.layers.Layer):
"""Multi-head Attention block with relative position embeddings.
Args:
num_heads (int): Number of attention heads.
key_dim (int): Size of each attention head for query, key, and
value.
use_bias (bool, optional): Whether to use bias when projecting
the queries, keys, and values. Defaults to `True`.
use_rel_pos (bool, optional): Whether to use relative positional
embeddings or not. Defaults to `False`.
input_size (tuple[int, int], optional): Size of the input image.
Must be provided when using relative positional embeddings.
Defaults to `None`.
Raises:
ValueError: When `input_size = None` with `use_rel_pos = True`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
- [Detectron2](https://github.com/facebookresearch/detectron2)
""" # noqa: E501
def __init__(
self,
num_heads,
key_dim,
use_bias=True,
use_rel_pos=False,
input_size=None,
**kwargs
):
super().__init__(**kwargs)
self.num_heads = num_heads
self.key_dim = key_dim
self.scale = self.key_dim**-0.5
self.use_bias = use_bias
self.input_size = input_size
self.use_rel_pos = use_rel_pos
self.qkv = keras.layers.Dense(
key_dim * self.num_heads * 3, use_bias=self.use_bias
)
self.projection = keras.layers.Dense(key_dim * self.num_heads)
if self.use_rel_pos:
if input_size is None:
raise ValueError(
"Input size must be provided if using relative "
"positional encoding."
)
self.add_decomposed_reative_pe = AddRelativePositionalEmbedding(
self.input_size, self.key_dim
)
def build(self, input_shape=None):
self.qkv.build([self.key_dim * self.num_heads])
self.projection.build([self.key_dim * self.num_heads])
self.built = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
shape = ops.shape(x)
B, H, W, C = shape[0], shape[1], shape[2], shape[3]
qkv = ops.transpose(
ops.reshape(
self.qkv(x), (B, H * W, 3, self.num_heads, self.key_dim)
),
axes=(2, 0, 3, 1, 4),
)
qkv = ops.reshape(qkv, (3, B * self.num_heads, H * W, self.key_dim))
queries, keys, values = ops.unstack(qkv, axis=0)
attention_map = (queries * self.scale) @ ops.transpose(
keys, axes=(0, 2, 1)
)
if self.use_rel_pos:
attention_map = self.add_decomposed_reative_pe(
attention_map,
queries=queries,
query_size=(H, W),
key_size=(H, W),
)
attention_map = ops.softmax(attention_map, axis=-1)
x = ops.reshape(
attention_map @ values, (B, self.num_heads, H, W, self.key_dim)
)
x = ops.transpose(x, axes=(0, 2, 3, 1, 4))
x = ops.reshape(x, (B, H, W, C))
x = self.projection(x)
return x
def get_config(self):
config = super().get_config()
config.update(
{
"num_heads": self.num_heads,
"key_dim": self.key_dim,
"use_bias": self.use_bias,
"use_rel_pos": self.use_rel_pos,
"input_size": self.input_size,
}
)
return config
@keras_cv_export(
"keras_cv.layers.WindowPartitioning", package="keras_cv.layers"
)
class WindowPartitioning(keras.layers.Layer):
def __init__(self, window_size, **kwargs):
super().__init__(**kwargs)
self.window_size = window_size
self.built = True
def partition(self, x):
shape = ops.shape(x)
B, H, W, C = shape[0], shape[1], shape[2], shape[3]
pad_height = (
self.window_size - H % self.window_size
) % self.window_size
pad_width = (self.window_size - W % self.window_size) % self.window_size
if pad_height > 0 or pad_width > 0:
x = ops.pad(x, ((0, 0), (0, pad_height), (0, pad_width), (0, 0)))
H_padded, W_padded = H + pad_height, W + pad_width
x = ops.reshape(
x,
(
B,
H_padded // self.window_size,
self.window_size,
W_padded // self.window_size,
self.window_size,
C,
),
)
windows = ops.reshape(
ops.transpose(x, axes=(0, 1, 3, 2, 4, 5)),
(-1, self.window_size, self.window_size, C),
)
return windows, (H_padded, W_padded)
def unpartition(self, windows, HW_padded, HW):
H_padded, W_padded = HW_padded
H, W = HW
B = ops.shape(windows)[0] // (
(H_padded // self.window_size) * (W_padded // self.window_size)
)
x = ops.reshape(
windows,
(
B,
H_padded // self.window_size,
W_padded // self.window_size,
self.window_size,
self.window_size,
-1,
),
)
x = ops.reshape(
ops.transpose(x, axes=(0, 1, 3, 2, 4, 5)),
(B, H_padded, W_padded, -1),
)
return x[:, :H, :W, :]
def get_config(self):
config = super().get_config()
config.update({"window_size": self.window_size})
return config
@keras_cv_export(
"keras_cv.layers.WindowedTransformerEncoder", package="keras_cv.layers"
)
class WindowedTransformerEncoder(keras.layers.Layer):
"""Transformer blocks with support of window attention and residual
propagation blocks.
Args:
project_dim (int): the dimensionality of the projection of the
encoder, and output of the `MultiHeadAttention`.
mlp_dim (int): the intermediate dimensionality of the MLP head before
projecting to `project_dim`.
num_heads (int): the number of heads for the `MultiHeadAttention`
layer.
use_bias (bool, optional): Whether to use bias to project the keys,
queries, and values in the attention layer. Defaults to `True`.
use_rel_pos (bool, optional): Whether to use relative positional
emcodings in the attention layer. Defaults to `False`.
window_size (int, optional): Window size for windowed attention.
Defaults to `0`.
input_size (tuple[int, int], optional): Height and width of the input
image as a tuple of integers. Must be provided when using relative
positional embeddings. Defaults to `None`.
activation (str, optional): the activation function to apply in the
MLP head - should be a function. Defaults to `"gelu"`.
layer_norm_epsilon (float, optional): The epsilon to use in the layer
normalization layers. Defaults to `1e-6`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
- [Detectron2](https://github.com/facebookresearch/detectron2)
""" # noqa: E501
def __init__(
self,
project_dim,
mlp_dim,
num_heads,
use_bias=True,
use_rel_pos=False,
window_size=0,
input_size=None,
activation="gelu",
layer_norm_epsilon=1e-6,
**kwargs
):
super().__init__(**kwargs)
self.project_dim = project_dim
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.use_bias = use_bias
self.input_size = input_size
self.activation = activation
self.layer_norm_epsilon = layer_norm_epsilon
self.window_size = window_size
self.use_rel_pos = use_rel_pos
self.layer_norm1 = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon
)
self.layer_norm2 = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon
)
self.attention = MultiHeadAttentionWithRelativePE(
num_heads=self.num_heads,
key_dim=self.project_dim // self.num_heads,
use_bias=use_bias,
use_rel_pos=use_rel_pos,
input_size=(
input_size if window_size == 0 else (window_size, window_size)
),
)
self.mlp_block = MLP(
mlp_dim,
project_dim,
num_layers=2,
activation="gelu",
)
self.window_partitioning = WindowPartitioning(window_size)
def build(self, input_shape=None):
self.layer_norm1.build([None, None, None, self.project_dim])
self.layer_norm2.build([None, None, None, self.project_dim])
self.attention.build()
self.mlp_block.build([None, None, None, self.project_dim])
self.built = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
shortcut = x
x = self.layer_norm1(x)
# Window Partition
if self.window_size > 0:
H, W = ops.shape(x)[1], ops.shape(x)[2]
x, HW_padded = self.window_partitioning.partition(x)
x = self.attention(x)
# Reverse Window Partition
if self.window_size > 0:
x = self.window_partitioning.unpartition(
x, HW_padded=HW_padded, HW=(H, W)
)
x = shortcut + x
x = x + self.mlp_block(self.layer_norm2(x))
return x
def get_config(self):
config = super().get_config()
config.update(
{
"project_dim": self.project_dim,
"mlp_dim": self.mlp_dim,
"num_heads": self.num_heads,
"use_bias": self.use_bias,
"use_rel_pos": self.use_rel_pos,
"window_size": self.window_size,
"input_size": self.input_size,
"activation": self.activation,
"layer_norm_epsilon": self.layer_norm_epsilon,
}
)
return config
@keras_cv_export(
"keras_cv.layers.ViTDetPatchingAndEmbedding", package="keras_cv.layers"
)
class ViTDetPatchingAndEmbedding(keras.layers.Layer):
"""Image to Patch Embedding using only a conv layer (without
layer normalization).
Args:
kernel_size (tuple[int, int], optional): Kernel size of the
projection layer. Defaults to `(16, 16)`.
strides (tuple, optional): Strides of the projection layer.
Defaults to `(16, 16)`.
embed_dim (int, optional): Number of filters to use in the
projection layer i.e. projection size. Defaults to `768`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
- [Detectron2](https://github.com/facebookresearch/detectron2)
""" # noqa: E501
def __init__(
self, kernel_size=(16, 16), strides=(16, 16), embed_dim=768, **kwargs
):
super().__init__(**kwargs)
self.projection = keras.layers.Conv2D(
embed_dim, kernel_size=kernel_size, strides=strides
)
self.kernel_size = kernel_size
self.strides = strides
self.embed_dim = embed_dim
def build(self, input_shape):
self.projection.build(input_shape)
self.built = True
def compute_output_shape(self, input_shape):
return self.projection.compute_output_shape(input_shape)
def call(self, x):
x = self.projection(x)
return x
def get_config(self):
config = super().get_config()
config.update(
{
"kernel_size": self.kernel_size,
"strides": self.strides,
"embed_dim": self.embed_dim,
}
)
return config
# TODO: Merge this with the `keras_cv.layers.PatchingAndEmbedding` class once
# it has been ported to Keras Core.
@keras_cv_export(
"keras_cv.layers.AddPositionalEmbedding", package="keras_cv.layers"
)
class AddPositionalEmbedding(keras.layers.Layer):
def __init__(self, img_size, patch_size, embed_dim, **kwargs):
super().__init__(**kwargs)
self.img_size = img_size
self.patch_size = patch_size
self.embed_dim = embed_dim
self.pos_embed = self.add_weight(
name="pos_embed",
shape=(
1,
img_size // patch_size,
img_size // patch_size,
embed_dim,
),
initializer="zeros",
trainable=True,
)
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
return x + self.pos_embed
def get_confg(self):
config = super().get_config()
config.update(
{
"img_size": self.img_size,
"patch_size": self.patch_size,
"embed_dim": self.embed_dim,
}
)
return config
| keras-cv/keras_cv/layers/vit_det_layers.py/0 | {
"file_path": "keras-cv/keras_cv/layers/vit_det_layers.py",
"repo_id": "keras-cv",
"token_count": 9997
} | 70 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.losses.BinaryPenaltyReducedFocalCrossEntropy")
class BinaryPenaltyReducedFocalCrossEntropy(keras.losses.Loss):
"""Implements CenterNet modified Focal loss.
Compared with `keras.losses.BinaryFocalCrossentropy`, this loss discounts
for negative labels that have value less than `positive_threshold`, the
larger value the negative label is, the more discount to the final loss.
User can choose to divide the number of keypoints outside the loss
computation, or by passing in `sample_weight` as 1.0/num_key_points.
Args:
alpha: a focusing parameter used to compute the focal factor.
Defaults to 2.0. Note, this is equivalent to the `gamma` parameter in
`keras.losses.BinaryFocalCrossentropy`.
beta: a float parameter, penalty exponent for negative labels, defaults to
4.0.
from_logits: Whether `y_pred` is expected to be a logits tensor, defaults
to `False`.
positive_threshold: Anything bigger than this is treated as positive
label, defaults to 0.99.
positive_weight: single scalar weight on positive examples, defaults to
1.0.
negative_weight: single scalar weight on negative examples, defaults to
1.0.
Inputs:
y_true: [batch_size, ...] float tensor
y_pred: [batch_size, ...] float tensor with same shape as y_true.
References:
- [Objects as Points](https://arxiv.org/pdf/1904.07850.pdf) Eq 1.
- [Cornernet: Detecting objects as paired keypoints](https://arxiv.org/abs/1808.01244) for `alpha` and
`beta`.
""" # noqa: E501
def __init__(
self,
alpha=2.0,
beta=4.0,
from_logits=False,
positive_threshold=0.99,
positive_weight=1.0,
negative_weight=1.0,
reduction="sum_over_batch_size",
name="binary_penalty_reduced_focal_cross_entropy",
):
super().__init__(reduction=reduction, name=name)
self.alpha = alpha
self.beta = beta
self.from_logits = from_logits
self.positive_threshold = positive_threshold
self.positive_weight = positive_weight
self.negative_weight = negative_weight
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if self.from_logits:
y_pred = ops.sigmoid(y_pred)
# TODO(tanzhenyu): Evaluate whether we need clipping after model is
# trained.
y_pred = ops.clip(y_pred, 1e-4, 0.9999)
y_true = ops.clip(y_true, 0.0, 1.0)
pos_loss = ops.power(1.0 - y_pred, self.alpha) * ops.log(y_pred)
neg_loss = (
ops.power(1.0 - y_true, self.beta)
* ops.power(y_pred, self.alpha)
* ops.log(1.0 - y_pred)
)
positive_mask = y_true > self.positive_threshold
loss = ops.where(
positive_mask,
self.positive_weight * pos_loss,
self.negative_weight * neg_loss,
)
return -1.0 * loss
def get_config(self):
config = super().get_config()
config.update(
{
"alpha": self.alpha,
"beta": self.beta,
"from_logits": self.from_logits,
"positive_threshold": self.positive_threshold,
"positive_weight": self.positive_weight,
"negative_weight": self.negative_weight,
}
)
return config
| keras-cv/keras_cv/losses/penalty_reduced_focal_loss.py/0 | {
"file_path": "keras-cv/keras_cv/losses/penalty_reduced_focal_loss.py",
"repo_id": "keras-cv",
"token_count": 1767
} | 71 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for Backbone models."""
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.utils.preset_utils import check_preset_class
from keras_cv.utils.preset_utils import load_from_preset
from keras_cv.utils.python_utils import classproperty
from keras_cv.utils.python_utils import format_docstring
@keras_cv_export("keras_cv.models.Backbone")
class Backbone(keras.Model):
"""Base class for Backbone models.
Backbones are reusable layers of models trained on a standard task such as
Imagenet classification that can be reused in other tasks.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._pyramid_level_inputs = {}
self._functional_layer_ids = set(
id(layer) for layer in self._flatten_layers()
)
def __dir__(self):
# Temporary fixes for weight saving. This mimics the following PR for
# older version of Keras: https://github.com/keras-team/keras/pull/18982
def filter_fn(attr):
try:
return id(getattr(self, attr)) not in self._functional_layer_ids
except:
return True
return filter(filter_fn, super().__dir__())
def get_config(self):
# Don't chain to super here. The default `get_config()` for functional
# models is nested and cannot be passed to our Backbone constructors.
return {
"name": self.name,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
# The default `from_config()` for functional models will return a
# vanilla `keras.Model`. We override it to get a subclass instance back.
return cls(**config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configs."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configs that include weights."""
return {}
@classproperty
def presets_without_weights(cls):
"""Dictionary of preset names and configs that don't include weights."""
return {
preset: cls.presets[preset]
for preset in set(cls.presets) - set(cls.presets_with_weights)
}
@classmethod
def from_preset(
cls,
preset,
load_weights=None,
**kwargs,
):
"""Instantiate {{model_name}} model from preset config and weights.
Args:
preset: string. Must be one of "{{preset_names}}".
If looking for a preset with pretrained weights, choose one of
"{{preset_with_weights_names}}".
load_weights: Whether to load pre-trained weights into model.
Defaults to `None`, which follows whether the preset has
pretrained weights available.
Examples:
```python
# Load architecture and weights from preset
model = keras_cv.models.{{model_name}}.from_preset(
"{{example_preset_name}}",
)
# Load randomly initialized model from preset architecture with weights
model = keras_cv.models.{{model_name}}.from_preset(
"{{example_preset_name}}",
load_weights=False,
```
"""
# We support short IDs for official presets, e.g. `"bert_base_en"`.
# Map these to a Kaggle Models handle.
if preset in cls.presets:
preset = cls.presets[preset]["kaggle_handle"]
check_preset_class(preset, cls)
return load_from_preset(
preset,
load_weights=load_weights,
config_overrides=kwargs,
)
def __init_subclass__(cls, **kwargs):
# Use __init_subclass__ to set up a correct docstring for from_preset.
super().__init_subclass__(**kwargs)
# If the subclass does not define from_preset, assign a wrapper so that
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:
def from_preset(calling_cls, *args, **kwargs):
return super(cls, calling_cls).from_preset(*args, **kwargs)
cls.from_preset = classmethod(from_preset)
if not cls.presets:
cls.from_preset.__func__.__doc__ = """Not implemented.
No presets available for this class.
"""
# Format and assign the docstring unless the subclass has overridden it.
if cls.from_preset.__doc__ is None:
cls.from_preset.__func__.__doc__ = Backbone.from_preset.__doc__
format_docstring(
model_name=cls.__name__,
example_preset_name=next(iter(cls.presets_with_weights), ""),
preset_names='", "'.join(cls.presets),
preset_with_weights_names='", "'.join(cls.presets_with_weights),
)(cls.from_preset.__func__)
@property
def pyramid_level_inputs(self):
"""Intermediate model outputs for feature extraction.
Format is a dictionary with string as key and layer name as value.
The string key represents the level of the feature output. A typical
feature pyramid has five levels corresponding to scales "P3", "P4",
"P5", "P6", "P7" in the backbone. Scale Pn represents a feature map 2^n
times smaller in width and height than the input image.
Example:
```python
{
'P3': 'v2_stack_1_block4_out',
'P4': 'v2_stack_2_block6_out',
'P5': 'v2_stack_3_block3_out',
}
```
"""
return self._pyramid_level_inputs
@pyramid_level_inputs.setter
def pyramid_level_inputs(self, value):
self._pyramid_level_inputs = value
| keras-cv/keras_cv/models/backbones/backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/backbone.py",
"repo_id": "keras-cv",
"token_count": 2673
} | 72 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_backbone import ( # noqa: E501
EfficientNetLiteBackbone,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Usage:
```python
input_data = np.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = {name}Backbone()
output = model(input_data)
```
""" # noqa: E501
class EfficientNetLiteB0Backbone(EfficientNetLiteBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b0", **kwargs
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetLiteB1Backbone(EfficientNetLiteBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b1", **kwargs
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetLiteB2Backbone(EfficientNetLiteBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b2", **kwargs
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetLiteB3Backbone(EfficientNetLiteBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b3", **kwargs
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetLiteB4Backbone(EfficientNetLiteBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b4", **kwargs
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(
EfficientNetLiteB0Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetLiteB0"),
)
setattr(
EfficientNetLiteB1Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetLiteB1"),
)
setattr(
EfficientNetLiteB2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetLiteB2"),
)
setattr(
EfficientNetLiteB3Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetLiteB3"),
)
setattr(
EfficientNetLiteB4Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetLiteB4"),
)
| keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_aliases.py",
"repo_id": "keras-cv",
"token_count": 2848
} | 73 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2SBackbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone import (
EfficientNetV2Backbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class EfficientNetV2BackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(8, 224, 224, 3))
def test_valid_call(self):
model = EfficientNetV2Backbone(
stackwise_kernel_sizes=[3, 3, 3, 3, 3, 3],
stackwise_num_repeats=[2, 4, 4, 6, 9, 15],
stackwise_input_filters=[24, 24, 48, 64, 128, 160],
stackwise_output_filters=[24, 48, 64, 128, 160, 256],
stackwise_expansion_ratios=[1, 4, 4, 4, 6, 6],
stackwise_squeeze_and_excite_ratios=[0.0, 0.0, 0, 0.25, 0.25, 0.25],
stackwise_strides=[1, 2, 2, 2, 1, 2],
stackwise_conv_types=[
"fused",
"fused",
"fused",
"unfused",
"unfused",
"unfused",
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_alias_model_with_rescaling(self):
model = EfficientNetV2SBackbone(include_rescaling=True)
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = EfficientNetV2Backbone(
stackwise_kernel_sizes=[3, 3, 3, 3, 3, 3],
stackwise_num_repeats=[2, 4, 4, 6, 9, 15],
stackwise_input_filters=[24, 24, 48, 64, 128, 160],
stackwise_output_filters=[24, 48, 64, 128, 160, 256],
stackwise_expansion_ratios=[1, 4, 4, 4, 6, 6],
stackwise_squeeze_and_excite_ratios=[0.0, 0.0, 0, 0.25, 0.25, 0.25],
stackwise_strides=[1, 2, 2, 2, 1, 2],
stackwise_conv_types=[
"fused",
"fused",
"fused",
"unfused",
"unfused",
"unfused",
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = EfficientNetV2Backbone(
stackwise_kernel_sizes=[3, 3, 3, 3, 3, 3],
stackwise_num_repeats=[2, 4, 4, 6, 9, 15],
stackwise_input_filters=[24, 24, 48, 64, 128, 160],
stackwise_output_filters=[24, 48, 64, 128, 160, 256],
stackwise_expansion_ratios=[1, 4, 4, 4, 6, 6],
stackwise_squeeze_and_excite_ratios=[0.0, 0.0, 0, 0.25, 0.25, 0.25],
stackwise_strides=[1, 2, 2, 2, 1, 2],
stackwise_conv_types=[
"fused",
"fused",
"fused",
"unfused",
"unfused",
"unfused",
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "efficientnet_v2_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, EfficientNetV2Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = EfficientNetV2SBackbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "efficientnet_v2_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, EfficientNetV2Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = EfficientNetV2SBackbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P1", "P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P1"].shape,
(None, input_size // 2**1, input_size // 2**1, 24),
)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 48),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 64),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 160),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 1280),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
model = EfficientNetV2Backbone(
stackwise_kernel_sizes=[3, 3, 3, 3, 3, 3],
stackwise_num_repeats=[2, 4, 4, 6, 9, 15],
stackwise_input_filters=[24, 24, 48, 64, 128, 160],
stackwise_output_filters=[24, 48, 64, 128, 160, 256],
stackwise_expansion_ratios=[1, 4, 4, 4, 6, 6],
stackwise_squeeze_and_excite_ratios=[0.0, 0.0, 0, 0.25, 0.25, 0.25],
stackwise_strides=[1, 2, 2, 2, 1, 2],
stackwise_conv_types=[
"fused",
"fused",
"fused",
"unfused",
"unfused",
"unfused",
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
self.assertEqual(model.output_shape, (None, None, None, 1280))
| keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 3821
} | 74 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNetV1 model preset configurations."""
backbone_presets_no_weights = {
"resnet18": {
"metadata": {
"description": (
"ResNet model with 18 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style)."
),
"params": 11186112,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet18/2",
},
"resnet34": {
"metadata": {
"description": (
"ResNet model with 34 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style)."
),
"params": 21301696,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet34/2",
},
"resnet50": {
"metadata": {
"description": (
"ResNet model with 50 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style)."
),
"params": 23561152,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet50/2",
},
"resnet101": {
"metadata": {
"description": (
"ResNet model with 101 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style)."
),
"params": 42605504,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet101/2",
},
"resnet152": {
"metadata": {
"description": (
"ResNet model with 152 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style)."
),
"params": 58295232,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet152/2",
},
}
backbone_presets_with_weights = {
"resnet50_imagenet": {
"metadata": {
"description": (
"ResNet model with 50 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style). "
"Trained on Imagenet 2012 classification task."
),
"params": 23561152,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet50_imagenet/2",
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1743
} | 75 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetBBackbone
from keras_cv.tests.test_case import TestCase
class TestViTDetBackbone(TestCase):
@pytest.mark.large
def test_call(self):
model = ViTDetBBackbone()
x = np.ones((1, 1024, 1024, 3))
x_out = ops.convert_to_numpy(model(x))
num_parameters = sum(
np.prod(tuple(x.shape)) for x in model.trainable_variables
)
self.assertEqual(x_out.shape, (1, 64, 64, 256))
self.assertEqual(num_parameters, 89_670_912)
@pytest.mark.extra_large
def teat_save(self):
# saving test
model = ViTDetBBackbone()
x = np.ones((1, 1024, 1024, 3))
x_out = ops.convert_to_numpy(model(x))
path = os.path.join(self.get_temp_dir(), "model.keras")
model.save(path)
loaded_model = keras.saving.load_model(path)
x_out_loaded = ops.convert_to_numpy(loaded_model(x))
self.assertAllClose(x_out, x_out_loaded)
@pytest.mark.extra_large
def test_fit(self):
model = ViTDetBBackbone()
x = np.ones((1, 1024, 1024, 3))
y = np.zeros((1, 64, 64, 256))
model.compile(optimizer="adam", loss="mse", metrics=["mse"])
model.fit(x, y, epochs=1)
def test_pyramid_level_inputs_error(self):
model = ViTDetBBackbone()
with self.assertRaises(NotImplementedError, msg="doesn't compute"):
model.pyramid_level_inputs
| keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 895
} | 76 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.models.legacy.convmixer import ConvMixer_512_16
from keras_cv.models.legacy.convmixer import ConvMixer_768_32
from keras_cv.models.legacy.convmixer import ConvMixer_1024_16
from keras_cv.models.legacy.convmixer import ConvMixer_1536_20
from keras_cv.models.legacy.convmixer import ConvMixer_1536_24
from keras_cv.models.legacy.convnext import ConvNeXtBase
from keras_cv.models.legacy.convnext import ConvNeXtLarge
from keras_cv.models.legacy.convnext import ConvNeXtSmall
from keras_cv.models.legacy.convnext import ConvNeXtTiny
from keras_cv.models.legacy.convnext import ConvNeXtXLarge
from keras_cv.models.legacy.darknet import DarkNet21
from keras_cv.models.legacy.darknet import DarkNet53
from keras_cv.models.legacy.mlp_mixer import MLPMixerB16
from keras_cv.models.legacy.mlp_mixer import MLPMixerB32
from keras_cv.models.legacy.mlp_mixer import MLPMixerL16
from keras_cv.models.legacy.object_detection.faster_rcnn.faster_rcnn import (
FasterRCNN,
)
from keras_cv.models.legacy.regnet import RegNetX002
from keras_cv.models.legacy.regnet import RegNetX004
from keras_cv.models.legacy.regnet import RegNetX006
from keras_cv.models.legacy.regnet import RegNetX008
from keras_cv.models.legacy.regnet import RegNetX016
from keras_cv.models.legacy.regnet import RegNetX032
from keras_cv.models.legacy.regnet import RegNetX040
from keras_cv.models.legacy.regnet import RegNetX064
from keras_cv.models.legacy.regnet import RegNetX080
from keras_cv.models.legacy.regnet import RegNetX120
from keras_cv.models.legacy.regnet import RegNetX160
from keras_cv.models.legacy.regnet import RegNetX320
from keras_cv.models.legacy.regnet import RegNetY002
from keras_cv.models.legacy.regnet import RegNetY004
from keras_cv.models.legacy.regnet import RegNetY006
from keras_cv.models.legacy.regnet import RegNetY008
from keras_cv.models.legacy.regnet import RegNetY016
from keras_cv.models.legacy.regnet import RegNetY032
from keras_cv.models.legacy.regnet import RegNetY040
from keras_cv.models.legacy.regnet import RegNetY064
from keras_cv.models.legacy.regnet import RegNetY080
from keras_cv.models.legacy.regnet import RegNetY120
from keras_cv.models.legacy.regnet import RegNetY160
from keras_cv.models.legacy.regnet import RegNetY320
from keras_cv.models.legacy.vgg16 import VGG16
from keras_cv.models.legacy.vgg19 import VGG19
from keras_cv.models.legacy.vit import ViTB16
from keras_cv.models.legacy.vit import ViTB32
from keras_cv.models.legacy.vit import ViTH16
from keras_cv.models.legacy.vit import ViTH32
from keras_cv.models.legacy.vit import ViTL16
from keras_cv.models.legacy.vit import ViTL32
from keras_cv.models.legacy.vit import ViTS16
from keras_cv.models.legacy.vit import ViTS32
from keras_cv.models.legacy.vit import ViTTiny16
from keras_cv.models.legacy.vit import ViTTiny32
| keras-cv/keras_cv/models/legacy/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/__init__.py",
"repo_id": "keras-cv",
"token_count": 1203
} | 77 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
@keras_cv_export("keras_cv.models.retinanet.PredictionHead")
class PredictionHead(keras.layers.Layer):
"""The class/box predictions head.
Arguments:
output_filters: Number of convolution filters in the final layer.
bias_initializer: Bias Initializer for the final convolution layer.
Returns:
A function representing either the classification
or the box regression head depending on `output_filters`.
"""
def __init__(
self, output_filters, bias_initializer, num_conv_layers=3, **kwargs
):
super().__init__(**kwargs)
self.output_filters = output_filters
self.bias_initializer = bias_initializer
self.num_conv_layers = num_conv_layers
self.conv_layers = [
keras.layers.Conv2D(
256,
kernel_size=3,
padding="same",
kernel_initializer="orthogonal",
activation="relu",
)
for _ in range(num_conv_layers)
]
self.prediction_layer = keras.layers.Conv2D(
self.output_filters,
kernel_size=3,
strides=1,
padding="same",
kernel_initializer="orthogonal",
bias_initializer=self.bias_initializer,
)
def call(self, x, training=False):
for layer in self.conv_layers:
x = layer(x, training=training)
x = self.prediction_layer(x, training=training)
return x
def compute_output_shape(self, input_shape):
return tuple(input_shape[:-1]) + (self.output_filters,)
def get_config(self):
config = {
"bias_initializer": keras.initializers.serialize(
self.bias_initializer
),
"output_filters": self.output_filters,
"num_conv_layers": self.num_conv_layers,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
config.update(
{
"bias_initializer": keras.initializers.deserialize(
config["bias_initializer"]
)
}
)
return super().from_config(config)
def build(self, input_shape):
self.conv_layers[0].build(input_shape)
intermediate_shape = tuple(input_shape[:-1]) + (256,)
for conv_layer in self.conv_layers[1:]:
conv_layer.build(intermediate_shape)
self.prediction_layer.build(intermediate_shape)
self.built = True
| keras-cv/keras_cv/models/object_detection/retinanet/prediction_head.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/retinanet/prediction_head.py",
"repo_id": "keras-cv",
"token_count": 1424
} | 78 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone_presets import backbone_presets
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
apply_basic_block as resnet_basic_block,
)
from keras_cv.models.segmentation.basnet.basnet_presets import basnet_presets
from keras_cv.models.segmentation.basnet.basnet_presets import (
presets_no_weights,
)
from keras_cv.models.segmentation.basnet.basnet_presets import (
presets_with_weights,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
@keras_cv_export(
[
"keras_cv.models.BASNet",
"keras_cv.models.segmentation.BASNet",
]
)
class BASNet(Task):
"""
A Keras model implementing the BASNet architecture for semantic
segmentation.
References:
- [BASNet: Boundary-Aware Segmentation Network for Mobile and Web Applications](https://arxiv.org/abs/2101.04704)
Args:
backbone: `keras.Model`. The backbone network for the model that is
used as a feature extractor for BASNet prediction encoder. Currently
supported backbones are ResNet18 and ResNet34. Default backbone is
`keras_cv.models.ResNet34Backbone()`
(Note: Do not specify 'input_shape', 'input_tensor', or 'include_rescaling'
within the backbone. Please provide these while initializing the
'BASNet' model.)
num_classes: int, the number of classes for the segmentation model.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
projection_filters: int, number of filters in the convolution layer
projecting low-level features from the `backbone`.
prediction_heads: (Optional) List of `keras.layers.Layer` defining
the prediction module head for the model. If not provided, a
default head is created with a Conv2D layer followed by resizing.
refinement_head: (Optional) a `keras.layers.Layer` defining the
refinement module head for the model. If not provided, a default
head is created with a Conv2D layer.
Examples:
```python
import keras_cv
images = np.ones(shape=(1, 288, 288, 3))
labels = np.zeros(shape=(1, 288, 288, 1))
# Note: Do not specify 'input_shape', 'input_tensor', or
# 'include_rescaling' within the backbone.
backbone = keras_cv.models.ResNet34Backbone()
model = keras_cv.models.segmentation.BASNet(
backbone=backbone,
num_classes=1,
input_shape=[288, 288, 3],
include_rescaling=False
)
# Evaluate model
output = model(images)
pred_labels = output[0]
# Train model
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(from_logits=False),
metrics=["accuracy"],
)
model.fit(images, labels, epochs=3)
```
""" # noqa: E501
def __init__(
self,
backbone,
num_classes,
input_shape=(None, None, 3),
input_tensor=None,
include_rescaling=False,
projection_filters=64,
prediction_heads=None,
refinement_head=None,
**kwargs,
):
if not isinstance(backbone, keras.layers.Layer) or not isinstance(
backbone, keras.Model
):
raise ValueError(
"Argument `backbone` must be a `keras.layers.Layer` instance"
f" or `keras.Model`. Received instead"
f" backbone={backbone} (of type {type(backbone)})."
)
if backbone.input_shape != (None, None, None, 3):
raise ValueError(
"Do not specify 'input_shape' or 'input_tensor' within the"
" 'BASNet' backbone. \nPlease provide 'input_shape' or"
" 'input_tensor' while initializing the 'BASNet' model."
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(1 / 255.0)(x)
if prediction_heads is None:
prediction_heads = []
for size in (1, 2, 4, 8, 16, 32, 32):
head_layers = [
keras.layers.Conv2D(
num_classes, kernel_size=(3, 3), padding="same"
)
]
if size != 1:
head_layers.append(
keras.layers.UpSampling2D(
size=size, interpolation="bilinear"
)
)
prediction_heads.append(keras.Sequential(head_layers))
if refinement_head is None:
refinement_head = keras.Sequential(
[
keras.layers.Conv2D(
num_classes, kernel_size=(3, 3), padding="same"
),
]
)
# Prediction model.
predict_model = basnet_predict(
x, backbone, projection_filters, prediction_heads
)
# Refinement model.
refine_model = basnet_rrm(
predict_model, projection_filters, refinement_head
)
outputs = refine_model.outputs # Combine outputs.
outputs.extend(predict_model.outputs)
outputs = [
keras.layers.Activation("sigmoid", dtype="float32")(_)
for _ in outputs
] # Activations.
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
self.backbone = backbone
self.num_classes = num_classes
self.input_tensor = input_tensor
self.include_rescaling = include_rescaling
self.projection_filters = projection_filters
self.prediction_heads = prediction_heads
self.refinement_head = refinement_head
def get_config(self):
return {
"backbone": keras.saving.serialize_keras_object(self.backbone),
"num_classes": self.num_classes,
"input_shape": self.input_shape[1:],
"input_tensor": keras.saving.serialize_keras_object(
self.input_tensor
),
"include_rescaling": self.include_rescaling,
"projection_filters": self.projection_filters,
"prediction_heads": [
keras.saving.serialize_keras_object(prediction_head)
for prediction_head in self.prediction_heads
],
"refinement_head": keras.saving.serialize_keras_object(
self.refinement_head
),
}
@classmethod
def from_config(cls, config):
if "backbone" in config and isinstance(config["backbone"], dict):
input_shape = (None, None, 3)
if isinstance(config["backbone"]["config"]["input_shape"], list):
input_shape = list(input_shape)
if config["backbone"]["config"]["input_shape"] != input_shape:
config["input_shape"] = config["backbone"]["config"][
"input_shape"
]
config["backbone"]["config"]["input_shape"] = input_shape
config["backbone"] = keras.layers.deserialize(config["backbone"])
if "input_tensor" in config and isinstance(
config["input_tensor"], dict
):
config["input_tensor"] = keras.layers.deserialize(
config["input_tensor"]
)
if "prediction_heads" in config and isinstance(
config["prediction_heads"], list
):
for i in range(len(config["prediction_heads"])):
if isinstance(config["prediction_heads"][i], dict):
config["prediction_heads"][i] = keras.layers.deserialize(
config["prediction_heads"][i]
)
if "refinement_head" in config and isinstance(
config["refinement_head"], dict
):
config["refinement_head"] = keras.layers.deserialize(
config["refinement_head"]
)
return super().from_config(config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
filtered_backbone_presets = copy.deepcopy(
{
k: v
for k, v in backbone_presets.items()
if k in ("resnet18", "resnet34")
}
)
return copy.deepcopy({**filtered_backbone_presets, **basnet_presets})
@classproperty
def presets_with_weights(cls):
"""
Dictionary of preset names and configurations that include weights.
"""
return copy.deepcopy(presets_with_weights)
@classproperty
def presets_without_weights(cls):
"""
Dictionary of preset names and configurations that has no weights.
"""
return copy.deepcopy(presets_no_weights)
@classproperty
def backbone_presets(cls):
"""
Dictionary of preset names and configurations of compatible backbones.
"""
filtered_backbone_presets = copy.deepcopy(
{
k: v
for k, v in backbone_presets.items()
if k in ("resnet18", "resnet34")
}
)
filtered_presets = copy.deepcopy(filtered_backbone_presets)
return filtered_presets
def convolution_block(x_input, filters, dilation=1):
"""
Apply convolution + batch normalization + ReLU activation.
Args:
x_input: Input keras tensor.
filters: int, number of output filters in the convolution.
dilation: int, dilation rate for the convolution operation.
Defaults to 1.
Returns:
A tensor with convolution, batch normalization, and ReLU
activation applied.
"""
x = keras.layers.Conv2D(
filters, (3, 3), padding="same", dilation_rate=dilation
)(x_input)
x = keras.layers.BatchNormalization()(x)
return keras.layers.Activation("relu")(x)
def get_resnet_block(_resnet, block_num):
"""
Extract and return a specific ResNet block.
Args:
_resnet: `keras.Model`. ResNet model instance.
block_num: int, block number to extract.
Returns:
A Keras Model representing the specified ResNet block.
"""
extractor_levels = ["P2", "P3", "P4", "P5"]
return keras.models.Model(
inputs=_resnet.get_layer(f"v2_stack_{block_num}_block1_1_conv").input,
outputs=_resnet.get_layer(
_resnet.pyramid_level_inputs[extractor_levels[block_num]]
).output,
name=f"resnet_block{block_num + 1}",
)
def basnet_predict(x_input, backbone, filters, segmentation_heads):
"""
BASNet Prediction Module.
This module outputs a coarse label map by integrating heavy
encoder, bridge, and decoder blocks.
Args:
x_input: Input keras tensor.
backbone: `keras.Model`. The backbone network used as a feature
extractor for BASNet prediction encoder.
filters: int, the number of filters.
segmentation_heads: List of `keras.layers.Layer`, A list of Keras
layers serving as the segmentation head for prediction module.
Returns:
A Keras Model that integrates the encoder, bridge, and decoder
blocks for coarse label map prediction.
"""
num_stages = 6
x = x_input
# -------------Encoder--------------
x = keras.layers.Conv2D(filters, kernel_size=(3, 3), padding="same")(x)
encoder_blocks = []
for i in range(num_stages):
if i < 4: # First four stages are adopted from ResNet backbone.
x = get_resnet_block(backbone, i)(x)
encoder_blocks.append(x)
else: # Last 2 stages consist of three basic resnet blocks.
x = keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x)
for j in range(3):
x = resnet_basic_block(
x,
filters=x.shape[3],
conv_shortcut=False,
name=f"v1_basic_block_{i + 1}_{j + 1}",
)
encoder_blocks.append(x)
# -------------Bridge-------------
x = convolution_block(x, filters=filters * 8, dilation=2)
x = convolution_block(x, filters=filters * 8, dilation=2)
x = convolution_block(x, filters=filters * 8, dilation=2)
encoder_blocks.append(x)
# -------------Decoder-------------
decoder_blocks = []
for i in reversed(range(num_stages)):
if i != (num_stages - 1): # Except first, scale other decoder stages.
x = keras.layers.UpSampling2D(size=2, interpolation="bilinear")(x)
x = keras.layers.concatenate([encoder_blocks[i], x], axis=-1)
x = convolution_block(x, filters=filters * 8)
x = convolution_block(x, filters=filters * 8)
x = convolution_block(x, filters=filters * 8)
decoder_blocks.append(x)
decoder_blocks.reverse() # Change order from last to first decoder stage.
decoder_blocks.append(encoder_blocks[-1]) # Copy bridge to decoder.
# -------------Side Outputs--------------
decoder_blocks = [
segmentation_head(decoder_block) # Prediction segmentation head.
for segmentation_head, decoder_block in zip(
segmentation_heads, decoder_blocks
)
]
return keras.models.Model(inputs=[x_input], outputs=decoder_blocks)
def basnet_rrm(base_model, filters, segmentation_head):
"""
BASNet Residual Refinement Module (RRM).
This module outputs a fine label map by integrating light encoder,
bridge, and decoder blocks.
Args:
base_model: Keras model used as the base or coarse label map.
filters: int, the number of filters.
segmentation_head: a `keras.layers.Layer`, A Keras layer serving
as the segmentation head for refinement module.
Returns:
A Keras Model that constructs the Residual Refinement Module (RRM).
"""
num_stages = 4
x_input = base_model.output[0]
# -------------Encoder--------------
x = keras.layers.Conv2D(filters, kernel_size=(3, 3), padding="same")(
x_input
)
encoder_blocks = []
for _ in range(num_stages):
x = convolution_block(x, filters=filters)
encoder_blocks.append(x)
x = keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x)
# -------------Bridge--------------
x = convolution_block(x, filters=filters)
# -------------Decoder--------------
for i in reversed(range(num_stages)):
x = keras.layers.UpSampling2D(size=2, interpolation="bilinear")(x)
x = keras.layers.concatenate([encoder_blocks[i], x], axis=-1)
x = convolution_block(x, filters=filters)
x = segmentation_head(x) # Refinement segmentation head.
# ------------- refined = coarse + residual
x = keras.layers.Add()([x_input, x]) # Add prediction + refinement output
return keras.models.Model(inputs=base_model.input, outputs=[x])
| keras-cv/keras_cv/models/segmentation/basnet/basnet.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/basnet/basnet.py",
"repo_id": "keras-cv",
"token_count": 7091
} | 79 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.vit_det_layers import MLP
@keras_cv_export(
"keras_cv.layers.MultiHeadAttentionWithDownsampling",
package="keras_cv.layers",
)
class MultiHeadAttentionWithDownsampling(keras.layers.Layer):
"""Multi-Head Attention with downsampling.
An attention layer that allows for downscaling the size of the embedding
after projection to queries, keys, and values.
This layer first downscales the features of input queries, keys, and
values using a dense layer. Multi-head attention is then performed
and the attention map is projected back (upscaled) to the number of
input features.
Args:
num_heads (int): Number of attention heads.
key_dim (int): Size of each attention head for query, key, and
value.
downsample_rate (int, optional): The factor by which to downscale the
input features i.e. the input features of size `key_dim` are
projected down to `key_dim // downsample_rate`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
""" # noqa: E501
def __init__(self, num_heads, key_dim, downsample_rate=1, **kwargs):
super().__init__(**kwargs)
self.num_heads = num_heads
self.key_dim = key_dim
self.downsample_rate = downsample_rate
self.internal_dims = key_dim // downsample_rate
# Downsample
self.query_proj = keras.layers.Dense(
self.internal_dims * self.num_heads
)
self.key_proj = keras.layers.Dense(self.internal_dims * self.num_heads)
self.value_proj = keras.layers.Dense(
self.internal_dims * self.num_heads
)
# Upsample
self.out_proj = keras.layers.Dense(self.key_dim * self.num_heads)
def build(self, input_shape=None):
self.query_proj.build([None, None, self.num_heads * self.key_dim])
self.key_proj.build([None, None, self.num_heads * self.key_dim])
self.value_proj.build([None, None, self.num_heads * self.key_dim])
self.out_proj.build([None, None, self.internal_dims * self.num_heads])
self.built = True
def __separate_heads(self, x):
shape = ops.shape(x)
B, N, C = shape[0], shape[1], shape[2]
x = ops.reshape(x, (B, N, self.num_heads, C // self.num_heads))
return ops.transpose(x, axes=(0, 2, 1, 3))
def __recombine_heads(self, x):
shape = ops.shape(x)
B, N_H, N_T, C_PH = shape[0], shape[1], shape[2], shape[3]
x = ops.transpose(x, axes=(0, 2, 1, 3))
return ops.reshape(x, (B, N_T, N_H * C_PH))
def call(self, query, value, key):
query = self.query_proj(query)
key = self.key_proj(key)
value = self.value_proj(value)
# Separate into heads
query = self.__separate_heads(query)
key = self.__separate_heads(key)
value = self.__separate_heads(value)
# Attention
C_PH = ops.shape(query)[-1]
out = query @ ops.transpose(key, (0, 1, 3, 2))
out = out / ops.sqrt(ops.cast(C_PH, dtype=self.compute_dtype))
out = ops.softmax(out, axis=-1)
# Get output
attention_map = out @ value
attention_map = self.__recombine_heads(attention_map)
return self.out_proj(attention_map)
def get_config(self):
config = super().get_config()
config.update(
{
"num_heads": self.num_heads,
"key_dim": self.key_dim,
"downsample_rate": self.downsample_rate,
}
)
return config
@keras_cv_export(
"keras_cv.layers.TwoWayMultiHeadAttention", package="keras_cv.layers"
)
class TwoWayMultiHeadAttention(keras.layers.Layer):
"""Two-way multi-head attention layer.
Args:
num_heads (int): Number of attention heads.
key_dim (int): Size of each attention head for query, key, and
value.
mlp_dim (int): Number of hidden dims to use in the mlp block.
skip_first_layer_pe (bool): A boolean indicating whether to skip the
first layer positional embeddings.
attention_downsample_rate (int, optional): The downsample rate to use
in the attention layers. Defaults to 2.
activation (str, optional): The activation for the mlp block's output
layer. Defaults to "relu".
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
""" # noqa: E501
def __init__(
self,
num_heads,
key_dim,
mlp_dim,
skip_first_layer_pe,
attention_downsample_rate=2,
activation="relu",
**kwargs,
):
super().__init__(**kwargs)
self.num_heads = num_heads
self.key_dim = key_dim
self.mlp_dim = mlp_dim
self.skip_first_layer_pe = skip_first_layer_pe
self.attention_downsample_rate = attention_downsample_rate
self.activation = activation
self.self_attention = MultiHeadAttentionWithDownsampling(
num_heads=num_heads, key_dim=key_dim
)
self.layer_norm1 = keras.layers.LayerNormalization(epsilon=1e-5)
self.cross_attention_token_to_image = (
MultiHeadAttentionWithDownsampling(
num_heads=num_heads,
key_dim=key_dim,
downsample_rate=attention_downsample_rate,
)
)
self.layer_norm2 = keras.layers.LayerNormalization(epsilon=1e-5)
self.mlp_block = MLP(
mlp_dim,
key_dim * num_heads,
num_layers=2,
activation=activation,
)
self.layer_norm3 = keras.layers.LayerNormalization(epsilon=1e-5)
self.cross_attention_image_to_token = (
MultiHeadAttentionWithDownsampling(
num_heads=num_heads,
key_dim=key_dim,
downsample_rate=attention_downsample_rate,
)
)
self.layer_norm4 = keras.layers.LayerNormalization(epsilon=1e-5)
def build(self, input_shape=None):
self.self_attention.build()
self.layer_norm1.build([None, None, self.num_heads * self.key_dim])
self.cross_attention_token_to_image.build()
self.layer_norm2.build([None, None, self.num_heads * self.key_dim])
self.mlp_block.build([None, None, self.num_heads * self.key_dim])
self.layer_norm3.build([None, None, self.num_heads * self.key_dim])
self.cross_attention_image_to_token.build()
self.layer_norm4.build([None, None, self.num_heads * self.key_dim])
self.built = True
def call(self, queries, keys, query_pe, key_pe):
if self.skip_first_layer_pe:
queries = self.self_attention(
query=queries, value=queries, key=queries
)
else:
queries_with_pe = queries + query_pe
attention_map = self.self_attention(
query=queries_with_pe, key=queries_with_pe, value=queries
)
queries = queries + attention_map
queries = self.layer_norm1(queries)
queries_with_pe = queries + query_pe
keys_with_pe = keys + key_pe
attention_map = self.cross_attention_token_to_image(
query=queries_with_pe, key=keys_with_pe, value=keys
)
queries = queries + attention_map
queries = self.layer_norm2(queries)
mlp_out = self.mlp_block(queries)
queries = queries + mlp_out
queries = self.layer_norm3(queries)
queries_with_pe = queries + query_pe
keys_with_pe = keys + key_pe
attention_map = self.cross_attention_image_to_token(
query=keys_with_pe, key=queries_with_pe, value=queries
)
keys = keys + attention_map
keys = self.layer_norm4(keys)
return queries, keys
def get_config(self):
config = super().get_config()
config.update(
{
"num_heads": self.num_heads,
"key_dim": self.key_dim,
"mlp_dim": self.mlp_dim,
"skip_first_layer_pe": self.skip_first_layer_pe,
"attention_downsample_rate": self.attention_downsample_rate,
"activation": self.activation,
}
)
return config
@keras_cv_export(
"keras_cv.layers.RandomFrequencyPositionalEmbeddings",
package="keras_cv.layers",
)
class RandomFrequencyPositionalEmbeddings(keras.layers.Layer):
"""Positional encoding using random spatial frequencies.
This layer maps coordinates/points in 2D space to positional
encodings using random spatial frequencies.
Args:
num_positional_features (int): Number of positional features
in the output.
scale (float): The standard deviation of the random frequencies.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
""" # noqa: E501
def __init__(self, num_positional_features, scale, **kwargs):
super().__init__(**kwargs)
self.num_positional_features = num_positional_features
self.scale = scale
self.positional_encoding_gaussian_matrix = self.add_weight(
name="positional_encoding_gaussian_matrix",
shape=(2, self.num_positional_features),
dtype=self.variable_dtype,
trainable=False,
initializer=keras.initializers.get("normal"),
)
def build(self, input_shape=None):
self.built = True
def __positional_encodings(self, coords):
coords = coords * 2 - 1
coords = coords @ ops.cast(
self.positional_encoding_gaussian_matrix, dtype=self.compute_dtype
)
coords = coords * (2 * math.pi)
return ops.concatenate([ops.sin(coords), ops.cos(coords)], axis=-1)
def call(self, size):
return self.encode_image(size)
def encode_image(self, size):
"""Generate a positional encoding for an image of any given size.
Args:
size (tuple[int, int]): The size of the image.
Returns:
tensor: Positional encoding of the image.
"""
H, W = size
grid = ops.ones(shape=(H, W), dtype=self.compute_dtype)
y_embed = ops.cumsum(grid, axis=0) - 0.5
x_embed = ops.cumsum(grid, axis=1) - 0.5
y_embed = y_embed / ops.cast(H, self.compute_dtype)
x_embed = x_embed / ops.cast(W, self.compute_dtype)
return self.__positional_encodings(
ops.stack([x_embed, y_embed], axis=-1)
)
def encode_coordinates(self, coords_input, image_size):
"""Positionally encode points that are not normalized to `[0, 1]`.
Args:
coords_input (tensor): 2D coordinates/points to map.
image_size (tuple[int, int]): Height and width of the image
being prompted.
Returns:
tensor: Positional encodings of the normalized coordinates.
"""
coords_normalized = ops.stack(
[
coords_input[..., 0] / image_size[1],
coords_input[..., 1] / image_size[0],
],
axis=-1,
)
return self.__positional_encodings(coords_normalized)
def get_config(self):
config = super().get_config()
config.update(
{
"num_positional_features": self.num_positional_features,
"scale": self.scale,
}
)
return config
| keras-cv/keras_cv/models/segmentation/segment_anything/sam_layers.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_layers.py",
"repo_id": "keras-cv",
"token_count": 5681
} | 80 |
<jupyter_start><jupyter_code>!pip install --upgrade tensorflow
!pip install "git+https://github.com/DavidLandup0/keras-cv@vit"
import math
import sys
import os
import pandas as pd
import tensorflow as tf
import keras_cv
import matplotlib.pyplot as plt
import numpy as np
config_names = {
# JAX name : KCV name, layer_nums
"Ti/16": ("ViTTiny16", 12),
"S/16": ("ViTS16", 12),
"B/16": ("ViTB16", 12),
"L/16": ("ViTL16", 24),
"S/32": ("ViTS32", 12),
"B/32": ("ViTB32", 12),
}
# Choose model to convert
model_to_convert = list(config_names.items())[0]
model_to_convert
model = eval(
f"keras_cv.models.{model_to_convert[1][0]}(include_rescaling=False, include_top=True, num_classes=1000, weights=None, input_shape=(224, 224, 3))"
)
with tf.io.gfile.GFile("gs://vit_models/augreg/index.csv") as f:
df = pd.read_csv(f)
df.head()
model_df = df.query(
f'ds=="i21k" & adapt_resolution==224 & adapt_ds=="imagenet2012" & name=="{model_to_convert[0]}"'
).sort_values("adapt_final_test", ascending=False)
model_df.head()
best_model_i1k_checkpoint = str(model_df.iloc[0]["adapt_filename"])
model_df.iloc[0]["adapt_filename"], model_df.iloc[0]["adapt_final_test"]
filename = best_model_i1k_checkpoint
path = f"gs://vit_models/augreg/{filename}.npz"
print(f"{tf.io.gfile.stat(path).length / 1024 / 1024:.1f} MiB - {path}")
local_path = path.split("//")[-1].split("/")[-1]
local_path
!gsutil cp {path} .
with open(local_path, "rb") as f:
params_jax = np.load(f)
params_jax = dict(zip(params_jax.keys(), params_jax.values()))
from pprint import pformat
print(pformat(list(params_jax.keys())))
jax_params_to_kcv_params = {
"Transformer/posembed_input/pos_embedding": "patch_embedding/embedding/embeddings",
"embedding/bias": "patch_embedding_1/dense_26/bias",
"embedding/kernel": "patch_embedding_1/dense_26/kernel",
"cls": "patch_embedding_1/class_token",
"Transformer/encoderblock_0/LayerNorm_0/scale": "transformer_encoder_12/layer_normalization_25/gamma",
"Transformer/encoderblock_0/LayerNorm_0/bias": "transformer_encoder_12/layer_normalization_25/beta",
"Transformer/encoderblock_0/LayerNorm_2/scale": "transformer_encoder_12/layer_normalization_26/gamma",
"Transformer/encoderblock_0/LayerNorm_2/bias": "transformer_encoder_12/layer_normalization_26/beta",
"Transformer/encoderblock_0/MultiHeadDotProductAttention_1/query/kernel": "transformer_encoder_12/multi_head_attention_12/query/kernel",
"Transformer/encoderblock_0/MultiHeadDotProductAttention_1/query/bias": "transformer_encoder_12/multi_head_attention_12/query/bias",
"Transformer/encoderblock_0/MultiHeadDotProductAttention_1/key/kernel": "transformer_encoder_12/multi_head_attention_12/key/kernel",
"Transformer/encoderblock_0/MultiHeadDotProductAttention_1/key/bias": "transformer_encoder_12/multi_head_attention_12/key/bias",
"Transformer/encoderblock_0/MultiHeadDotProductAttention_1/value/kernel": "transformer_encoder_12/multi_head_attention_12/value/kernel",
"Transformer/encoderblock_0/MultiHeadDotProductAttention_1/value/bias": "transformer_encoder_12/multi_head_attention_12/value/bias",
"Transformer/encoderblock_0/MultiHeadDotProductAttention_1/out/kernel": "transformer_encoder_12/multi_head_attention_12/attention_output/kernel",
"Transformer/encoderblock_0/MultiHeadDotProductAttention_1/out/bias": "transformer_encoder_12/multi_head_attention_12/attention_output/bias",
"Transformer/encoderblock_0/MlpBlock_3/Dense_0/kernel": "transformer_encoder_12/dense_27/kernel",
"Transformer/encoderblock_0/MlpBlock_3/Dense_0/bias": "transformer_encoder_12/dense_27/bias",
"Transformer/encoderblock_0/MlpBlock_3/Dense_1/kernel": "transformer_encoder_12/dense_28/kernel",
"Transformer/encoderblock_0/MlpBlock_3/Dense_1/bias": "transformer_encoder_12/dense_28/bias",
# ... other transformer blocks
"Transformer/encoder_norm/scale": "layer_normalization_49/gamma",
"Transformer/encoder_norm/bias": "layer_normalization_49/beta",
}
model.layers
model.summary()
# Check shapes for the class token and embedding layers
print(params_jax["cls"].shape)
print(params_jax["embedding/kernel"].shape)
print(params_jax["embedding/bias"].shape)
print(params_jax["Transformer/posembed_input/pos_embedding"].shape)
for w in model.layers[1].weights:
print(w.name, w.shape)
# Copy PatchingAndEmbedding layer
model.layers[1].weights[0].assign(tf.Variable(params_jax["cls"]))
model.layers[1].weights[1].assign(tf.Variable(params_jax["embedding/kernel"]))
model.layers[1].weights[2].assign(tf.Variable(params_jax["embedding/bias"]))
model.layers[1].weights[3].assign(
tf.Variable(
params_jax["Transformer/posembed_input/pos_embedding"].squeeze()
)
)
# Check transformer block shapes between JAX and KCV
print(params_jax["Transformer/encoderblock_4/LayerNorm_0/scale"].shape)
print(params_jax["Transformer/encoderblock_4/LayerNorm_0/bias"].shape)
print(params_jax["Transformer/encoderblock_4/LayerNorm_2/scale"].shape)
print(params_jax[f"Transformer/encoderblock_4/LayerNorm_2/bias"].shape)
print(
params_jax[
f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/query/kernel"
].shape
)
print(
params_jax[
f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/query/bias"
].shape
)
print(
params_jax[
f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/key/kernel"
].shape
)
print(
params_jax[
f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/key/bias"
].shape
)
print(
params_jax[
f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/value/kernel"
].shape
)
print(
params_jax[
f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/value/bias"
].shape
)
print(
params_jax[
f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/out/kernel"
].shape
)
print(
params_jax[
f"Transformer/encoderblock_4/MultiHeadDotProductAttention_1/out/bias"
].shape
)
print(params_jax[f"Transformer/encoderblock_4/MlpBlock_3/Dense_0/kernel"].shape)
print(params_jax[f"Transformer/encoderblock_4/MlpBlock_3/Dense_0/bias"].shape)
print(params_jax[f"Transformer/encoderblock_4/MlpBlock_3/Dense_1/kernel"].shape)
print(params_jax[f"Transformer/encoderblock_4/MlpBlock_3/Dense_1/bias"].shape)
for w in model.layers[4].weights:
print(w.name, w.shape)
# Copy Transformer Encoders
for i in range(model_to_convert[1][1]):
model.layers[3 + i].weights[0].assign(
tf.Variable(
params_jax[f"Transformer/encoderblock_{i}/LayerNorm_0/scale"]
)
)
model.layers[3 + i].weights[1].assign(
tf.Variable(
params_jax[f"Transformer/encoderblock_{i}/LayerNorm_0/bias"]
)
)
model.layers[3 + i].weights[2].assign(
tf.Variable(
params_jax[f"Transformer/encoderblock_{i}/LayerNorm_2/scale"]
)
)
model.layers[3 + i].weights[3].assign(
tf.Variable(
params_jax[f"Transformer/encoderblock_{i}/LayerNorm_2/bias"]
)
)
model.layers[3 + i].weights[4].assign(
tf.Variable(
params_jax[
f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/query/kernel"
]
)
)
model.layers[3 + i].weights[5].assign(
tf.Variable(
params_jax[
f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/query/bias"
]
)
)
model.layers[3 + i].weights[6].assign(
tf.Variable(
params_jax[
f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/key/kernel"
]
)
)
model.layers[3 + i].weights[7].assign(
tf.Variable(
params_jax[
f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/key/bias"
]
)
)
model.layers[3 + i].weights[8].assign(
tf.Variable(
params_jax[
f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/value/kernel"
]
)
)
model.layers[3 + i].weights[9].assign(
tf.Variable(
params_jax[
f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/value/bias"
]
)
)
model.layers[3 + i].weights[10].assign(
tf.Variable(
params_jax[
f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/out/kernel"
]
)
)
model.layers[3 + i].weights[11].assign(
tf.Variable(
params_jax[
f"Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/out/bias"
].reshape(model.layers[3 + i].weights[11].shape)
)
)
model.layers[3 + i].weights[12].assign(
tf.Variable(
params_jax[
f"Transformer/encoderblock_{i}/MlpBlock_3/Dense_0/kernel"
]
)
)
model.layers[3 + i].weights[13].assign(
tf.Variable(
params_jax[f"Transformer/encoderblock_{i}/MlpBlock_3/Dense_0/bias"]
)
)
model.layers[3 + i].weights[14].assign(
tf.Variable(
params_jax[
f"Transformer/encoderblock_{i}/MlpBlock_3/Dense_1/kernel"
]
)
)
model.layers[3 + i].weights[15].assign(
tf.Variable(
params_jax[f"Transformer/encoderblock_{i}/MlpBlock_3/Dense_1/bias"]
)
)
print(params_jax["Transformer/encoder_norm/scale"].shape)
print(params_jax["Transformer/encoder_norm/bias"].shape)
for w in model.layers[15].weights:
print(w.name, w.shape)
# Copy layer norm before class head
model.layers[15].weights[0].assign(
tf.Variable(params_jax["Transformer/encoder_norm/scale"])
)
model.layers[15].weights[1].assign(
tf.Variable(params_jax["Transformer/encoder_norm/bias"])
)
print(params_jax["head/kernel"].shape)
print(params_jax["head/bias"].shape)
for w in model.layers[17].weights:
print(w.name, w.shape)
# Copy haed kernel and bias
model.layers[17].weights[0].assign(tf.Variable(params_jax["head/kernel"]))
model.layers[17].weights[1].assign(tf.Variable(params_jax["head/bias"]))
import matplotlib.pyplot as plt
import cv2
import numpy as np
import PIL
import urllib
def url_to_array(url):
req = urllib.request.urlopen(url)
arr = np.array(bytearray(req.read()), dtype=np.int8)
arr = cv2.imdecode(arr, -1)
arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
arr = cv2.resize(arr, (224, 224))
return arr
def preprocess_image(image, label):
image_resized = tf.image.resize(image, (224, 224))
image_resized = tf.cast(image_resized, tf.float32)
image_resized = (image_resized - 127.5) / 127.5
return image_resized, label
cat = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Cat_November_2010-1a.jpg/1200px-Cat_November_2010-1a.jpg"
cat_img = url_to_array(cat)
cat_img, _ = preprocess_image(cat_img, None)
cat_img = tf.expand_dims(cat_img, 0)
!wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt -O ilsvrc2012_wordnet_lemmas.txt
with open("ilsvrc2012_wordnet_lemmas.txt", "r") as f:
lines = f.readlines()
imagenet_int_to_str = [line.rstrip() for line in lines]
predictions = model.predict(cat_img)
top_5 = tf.math.top_k(predictions, k=5, sorted=False)
top_5
pred = np.argmax(predictions)
imagenet_int_to_str[int(pred)]
dog_url = "https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/dog-puppy-on-garden-royalty-free-image-1586966191.jpg?crop=1.00xw:0.669xh;0,0.190xh&resize=640:*"
dog_img = url_to_array(dog_url)
dog_img, _ = preprocess_image(dog_img, None)
dog_img = tf.expand_dims(dog_img, 0)
predictions = model.predict(dog_img)
pred = np.argmax(predictions)
imagenet_int_to_str[int(pred)]
model.compile(
"adam",
"sparse_categorical_crossentropy",
metrics=["accuracy", keras.metrics.SparseTopKCategoricalAccuracy(5)],
)
import tensorflow_datasets as tfds
(test_set), info = tfds.load(
"imagenet_v2", split=["test"], as_supervised=True, with_info=True
)
test_set = (
test_set[0]
.shuffle(len(test_set[0]))
.map(preprocess_image)
.batch(32)
.prefetch(tf.data.AUTOTUNE)
)
for entry, label in test_set.take(1):
print(label)
model.evaluate(test_set)
model.save(f"{model_to_convert[1][0]}.h5")<jupyter_output><empty_output> | keras-cv/keras_cv/tools/checkpoint_conversion/ViT_weight_conversion.ipynb/0 | {
"file_path": "keras-cv/keras_cv/tools/checkpoint_conversion/ViT_weight_conversion.ipynb",
"repo_id": "keras-cv",
"token_count": 5612
} | 81 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import inspect
import json
import os
from keras_cv.backend import keras
try:
import kagglehub
except ImportError:
kagglehub = None
KAGGLE_PREFIX = "kaggle://"
GS_PREFIX = "gs://"
def get_file(preset, path):
"""Download a preset file in necessary and return the local path."""
if not isinstance(preset, str):
raise ValueError(
f"A preset identifier must be a string. Received: preset={preset}"
)
if preset.startswith(KAGGLE_PREFIX):
if kagglehub is None:
raise ImportError(
"`from_preset()` requires the `kagglehub` package. "
"Please install with `pip install kagglehub`."
)
# Insert the kaggle framework into the handle.
kaggle_handle = preset.removeprefix(KAGGLE_PREFIX)
num_segments = len(kaggle_handle.split("/"))
if num_segments not in (4, 5):
raise ValueError(
"Unexpected kaggle preset handle. Kaggle model handles "
"should have the form "
"kaggle://{org}/{model}/keras/{variant}[/{version}]. "
"For example, "
"'kaggle://keras/retinanet/keras/retinanet_base_en'. "
f"Received: preset={preset}"
)
return kagglehub.model_download(kaggle_handle, path)
elif preset.startswith(GS_PREFIX):
url = os.path.join(preset, path)
url = url.replace(GS_PREFIX, "https://storage.googleapis.com/")
subdir = preset.replace(GS_PREFIX, "gs_")
subdir = subdir.replace("/", "_").replace("-", "_")
filename = os.path.basename(path)
subdir = os.path.join(subdir, os.path.dirname(path))
return keras.utils.get_file(
filename,
url,
cache_subdir=os.path.join("models", subdir),
)
elif os.path.exists(preset):
# Assume a local filepath.
return os.path.join(preset, path)
else:
raise ValueError(
"Unknown preset identifier. A preset must be a one of:\n"
"1) a built in preset identifier like `'mobilenet_v3_small'`\n"
"2) a Kaggle Models handle like `'kaggle://keras/mobilenetv3/keras/mobilenet_v3_small'`\n" # noqa: E501
"3) a path to a local preset directory like `'./mobilenet_v3_small`\n" # noqa: E501
"Use `print(cls.presets.keys())` to view all built-in presets for "
"API symbol `cls`.\n"
f"Received: preset='{preset}'"
)
def recursive_pop(config, key):
"""Remove a key from a nested config object"""
config.pop(key, None)
for value in config.values():
if isinstance(value, dict):
recursive_pop(value, key)
if isinstance(value, list):
for v in value:
if isinstance(v, dict):
recursive_pop(v, key)
def save_to_preset(
layer,
preset,
save_weights=True,
config_filename="config.json",
weights_filename="model.weights.h5",
):
"""Save a KerasCV layer to a preset directory."""
os.makedirs(preset, exist_ok=True)
# Optionally save weights.
save_weights = save_weights and hasattr(layer, "save_weights")
if save_weights:
weights_path = os.path.join(preset, weights_filename)
layer.save_weights(weights_path)
# Save a serialized Keras object.
config_path = os.path.join(preset, config_filename)
config = keras.saving.serialize_keras_object(layer)
# Include references to weights.
config["weights"] = weights_filename if save_weights else None
recursive_pop(config, "compile_config")
recursive_pop(config, "build_config")
with open(config_path, "w") as config_file:
config_file.write(json.dumps(config, indent=4))
from keras_cv import __version__ as keras_cv_version
keras_version = keras.version() if hasattr(keras, "version") else None
# Save any associated metadata.
if config_filename == "config.json":
metadata = {
"keras_version": keras_version,
"keras_cv_version": keras_cv_version,
"parameter_count": layer.count_params(),
"date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"),
}
metadata_path = os.path.join(preset, "metadata.json")
with open(metadata_path, "w") as metadata_file:
metadata_file.write(json.dumps(metadata, indent=4))
def load_from_preset(
preset,
load_weights=None,
input_shape=None,
config_file="config.json",
config_overrides={},
):
"""Load a KerasCV layer to a preset directory."""
# Load a serialized Keras object.
config_path = get_file(preset, config_file)
with open(config_path) as config_file:
config = json.load(config_file)
config["config"] = {**config["config"], **config_overrides}
layer = keras.saving.deserialize_keras_object(config)
if input_shape is not None:
layer.build(input_shape)
# Check load_weights flag does not violate preset config.
if load_weights is True and config["weights"] is None:
raise ValueError(
f"The specified preset `{preset}` does not include weights. "
"Please remove the `load_weights` flag when calling "
"`from_preset()` on this preset."
)
# Default to loading weights if available.
if load_weights is not False and config["weights"] is not None:
weights_path = get_file(preset, config["weights"])
if hasattr(layer, "_layer_checkpoint_dependencies"):
legacy_load_weights(layer, weights_path)
else:
layer.load_weights(weights_path)
return layer
def check_preset_class(
preset,
classes,
config_file="config.json",
):
"""Validate a preset is being loaded on the correct class."""
config_path = get_file(preset, config_file)
try:
with open(config_path) as config_file:
config = json.load(config_file)
except:
raise ValueError(
f"The specified preset `{preset}` is unknown. "
"Please check documentation to ensure the correct preset "
"handle is being used."
)
cls = keras.saving.get_registered_object(config["registered_name"])
if not isinstance(classes, (tuple, list)):
classes = (classes,)
# Subclass checking and alias checking
if not any(issubclass(cls, obj) for obj in classes) and not any(
issubclass(alias, cls) for alias in classes
):
raise ValueError(
f"Unexpected class in preset `'{preset}'`. "
"When calling `from_preset()` on a class object, the preset class "
f"much match allowed classes. Allowed classes are `{classes}`. "
f"Received: `{cls}`."
)
return cls
def legacy_load_weights(layer, weights_path):
# Hacky fix for TensorFlow 2.13 and 2.14 when loading a `.weights.h5` file.
# We find the `Functional` class, and temporarily remove the
# `_layer_checkpoint_dependencies` property, which on older version of
# TensorFlow complete broke the variable paths for functional models.
functional_cls = None
for cls in inspect.getmro(layer.__class__):
if cls.__name__ == "Functional":
functional_cls = cls
property = functional_cls._layer_checkpoint_dependencies
functional_cls._layer_checkpoint_dependencies = {}
layer.load_weights(weights_path)
functional_cls._layer_checkpoint_dependencies = property
| keras-cv/keras_cv/utils/preset_utils.py/0 | {
"file_path": "keras-cv/keras_cv/utils/preset_utils.py",
"repo_id": "keras-cv",
"token_count": 3378
} | 82 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv import utils
from keras_cv.api_export import keras_cv_export
from keras_cv.utils import assert_matplotlib_installed
from keras_cv.visualization.plot_image_gallery import plot_image_gallery
def reshape_masks(segmentation_masks):
rank = len(segmentation_masks.shape)
if rank == 3:
# (B, H, W)
return segmentation_masks[..., np.newaxis]
elif rank == 4:
# (B, H, W, num_channels) OR (B, H, W, 1)
if segmentation_masks.shape[-1] == 1:
# Repeat the masks 3 times in order to build 3 channel
# segmentation masks.
return segmentation_masks.repeat(repeats=3, axis=-1)
else:
return np.argmax(segmentation_masks, axis=-1).repeat(
repeats=3, axis=-1
)
def transform_segmentation_masks(segmentation_masks, num_classes, value_range):
segmentation_masks = utils.to_numpy(segmentation_masks)
segmentation_masks = reshape_masks(segmentation_masks=segmentation_masks)
# Interpolate the segmentation masks from the range of (0, num_classes)
# to the value range provided.
segmentation_masks = utils.transform_value_range(
segmentation_masks,
original_range=(0, num_classes),
target_range=value_range,
)
return segmentation_masks
@keras_cv_export("keras_cv.visualization.plot_segmentation_mask_gallery")
def plot_segmentation_mask_gallery(
images,
value_range,
num_classes,
y_true=None,
y_pred=None,
rows=3,
cols=3,
**kwargs
):
"""Plots a gallery of images with corresponding segmentation masks.
Args:
images: a Tensor or NumPy array containing images to show in the
gallery. The images should be batched and of shape (B, H, W, C).
value_range: value range of the images. Common examples include
`(0, 255)` and `(0, 1)`.
num_classes: number of segmentation classes.
y_true: (Optional) a Tensor or NumPy array representing the ground truth
segmentation masks. The ground truth segmentation maps should be
batched.
y_pred: (Optional) a Tensor or NumPy array representing the predicted
segmentation masks. The predicted segmentation masks should be
batched.
kwargs: keyword arguments to propagate to
`keras_cv.visualization.plot_image_gallery()`.
Usage:
```python
train_ds = tfds.load(
"oxford_iiit_pet", split="train", with_info=False, shuffle_files=True
)
def unpackage_tfds_inputs(inputs):
image = inputs["image"]
segmentation_mask = inputs["segmentation_mask"]
return image, segmentation_mask
train_ds = train_ds.map(unpackage_tfds_inputs).ragged_batch(16)
images, segmentation_masks = next(iter(train_ds.take(1)))
keras_cv.visualization.plot_segmentation_mask_gallery(
images,
value_range=(0, 255),
num_classes=3, # The number of classes for the oxford iiit pet dataset
y_true=segmentation_masks,
y_pred=None,
scale=3,
rows=2,
cols=2,
)
```

"""
assert_matplotlib_installed("plot_segmentation_mask_gallery")
plotted_images = utils.to_numpy(images)
# Initialize a list to collect the segmentation masks that will be
# concatenated to the images for visualization.
masks_to_contatenate = [plotted_images]
if y_true is not None:
plotted_y_true = transform_segmentation_masks(
segmentation_masks=y_true,
num_classes=num_classes,
value_range=value_range,
)
masks_to_contatenate.append(plotted_y_true)
if y_pred is not None:
plotted_y_pred = transform_segmentation_masks(
segmentation_masks=y_pred,
num_classes=num_classes,
value_range=value_range,
)
masks_to_contatenate.append(plotted_y_pred)
# Concatenate the images and the masks together.
plotted_images = np.concatenate(masks_to_contatenate, axis=2)
plot_image_gallery(
plotted_images, value_range, rows=rows, cols=cols, **kwargs
)
| keras-cv/keras_cv/visualization/plot_segmentation_mask_gallery.py/0 | {
"file_path": "keras-cv/keras_cv/visualization/plot_segmentation_mask_gallery.py",
"repo_id": "keras-cv",
"token_count": 1954
} | 83 |
import sys
import keras
from absl import flags
import keras_cv
flags.DEFINE_string("weights_path", None, "Path of weights to load")
flags.DEFINE_string(
"output_weights_path", None, "Path of notop weights to store"
)
flags.DEFINE_string("model_name", None, "Name of the KerasCV.model")
FLAGS = flags.FLAGS
FLAGS(sys.argv)
if not FLAGS.weights_path.endswith(".h5"):
raise ValueError("Weights path must end in .h5")
model = eval(
f"keras_cv.models.{FLAGS.model_name}(include_rescaling=True, "
f"include_top=True, num_classes=1000, weights=FLAGS.weights_path)"
)
without_top = keras.models.Model(model.input, model.layers[-3].output)
without_top.save_weights(FLAGS.output_weights_path)
# Because the usage of keras_cv is in an eval() call, the linter is angry.
# We include this to avoid an unused import warning
keras_cv.models
| keras-cv/shell/weights/remove_top.py/0 | {
"file_path": "keras-cv/shell/weights/remove_top.py",
"repo_id": "keras-cv",
"token_count": 309
} | 84 |
# Applications
Kerasの応用は事前学習した重みを利用可能な深層学習のモデルです.
これらのモデルは予測,特徴量抽出そしてfine-tuningのために利用できます.
モデルをインスタンス化すると重みは自動的にダウンロードされます.重みは`~/.keras/models/`に格納されます.
## 利用可能なモデル
### ImageNetで学習した重みをもつ画像分類のモデル:
- [Xception](#xception)
- [VGG16](#vgg16)
- [VGG19](#vgg19)
- [ResNet, ResNetV2](#resnet)
- [InceptionV3](#inceptionv3)
- [InceptionResNetV2](#inceptionresnetv2)
- [MobileNet](#mobilenet)
- [MobileNetV2](#mobilenetv2)
- [DenseNet](#densenet)
- [NASNet](#nasnet)
これら全てのアーキテクチャは全てのバックエンド(TensorFlowやTheano,CNTK)と互換性があり,モデルはインスタンス化する時はKerasの設定ファイル`~/.keras/keras.json`に従って画像のデータフォーマットが設定されます.
例えば,`image_dim_ordering=channels_last`とした際は,このリポジトリからロードされるモデルは,TensorFlowの次元の順序"Height-Width-Depth"にしたがって構築されます.
注意:
- `Keras < 2.2.0`ではXceptionモデルはTensorFlowでのみ利用可能です.これは`SeparableConvolution`レイヤーに依存しているからです.
- `Keras < 2.1.5`ではMobileNetモデルはTensorFlowでのみ利用可能です.これは`DepthwiseConvolution`レイヤーに依存しているからです.
-----
## 画像分類モデルの使用例
### Classify ImageNet classes with ResNet50
```python
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet')
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
```
### Extract features with VGG16
```python
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import numpy as np
model = VGG16(weights='imagenet', include_top=False)
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
```
### Extract features from an arbitrary intermediate layer with VGG19
```python
from keras.applications.vgg19 import VGG19
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input
from keras.models import Model
import numpy as np
base_model = VGG19(weights='imagenet')
model = Model(inputs=base_model.input, outputs=base_model.get_layer('block4_pool').output)
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
block4_pool_features = model.predict(x)
```
### Fine-tune InceptionV3 on a new set of classes
```python
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer -- let's say we have 200 classes
predictions = Dense(200, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# train the model on the new data for a few epochs
model.fit_generator(...)
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
for i, layer in enumerate(base_model.layers):
print(i, layer.name)
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 249 layers and unfreeze the rest:
for layer in model.layers[:249]:
layer.trainable = False
for layer in model.layers[249:]:
layer.trainable = True
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
from keras.optimizers import SGD
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy')
# we train our model again (this time fine-tuning the top 2 inception blocks
# alongside the top Dense layers
model.fit_generator(...)
```
### Build InceptionV3 over a custom input tensor
```python
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Input
# this could also be the output a different Keras model or layer
input_tensor = Input(shape=(224, 224, 3)) # this assumes K.image_data_format() == 'channels_last'
model = InceptionV3(input_tensor=input_tensor, weights='imagenet', include_top=True)
```
-----
# Documentation for individual models
| Model | Size | Top-1 Accuracy | Top-5 Accuracy | Parameters | Depth |
| ----- | ----: | --------------: | --------------: | ----------: | -----: |
| [Xception](#xception) | 88 MB | 0.790 | 0.945| 22,910,480 | 126 |
| [VGG16](#vgg16) | 528 MB| 0.715 | 0.901 | 138,357,544 | 23
| [VGG19](#vgg19) | 549 MB | 0.727 | 0.910 | 143,667,240 | 26
| [ResNet50](#resnet) | 98 MB | 0.749 | 0.921 | 25,636,712 | - |
| [ResNet101](#resnet) | 171 MB | 0.764 | 0.928 | 44,707,176 | - |
| [ResNet152](#resnet) | 232 MB | 0.766 | 0.931 | 60,419,944 | - |
| [ResNet50V2](#resnet) | 98 MB | 0.760 | 0.930 | 25,613,800 | - |
| [ResNet101V2](#resnet) | 171 MB | 0.772 | 0.938 | 44,675,560 | - |
| [ResNet152V2](#resnet) | 232 MB | 0.780 | 0.942 | 60,380,648 | - |
| [InceptionV3](#inceptionv3) | 92 MB | 0.788 | 0.944 | 23,851,784 | 159 |
| [InceptionResNetV2](#inceptionresnetv2) | 215 MB | 0.804 | 0.953 | 55,873,736 | 572 |
| [MobileNet](#mobilenet) | 17 MB | 0.665 | 0.871 | 4,253,864 | 88
| [DenseNet121](#densenet) | 33 MB | 0.745 | 0.918 | 8,062,504 | 121
| [DenseNet169](#densenet) | 57 MB | 0.759 | 0.928 | 14,307,880 | 169
| [DenseNet201](#densenet) | 80 MB | 0.770 | 0.933 | 20,242,984 | 201
トップ1とトップ5の精度はImageNetの検証データセットを参照しています.
-----
## Xception
```python
keras.applications.xception.Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNetで事前学習した重みを利用可能なXception V1モデル.
ImageNetにおいて,このモデルのtop-1のvalidation accuracyは0.790で,top-5のvalidation accuracyは0.945です.
データフォーマットは`'channels_last'`(height, width, channels)のみサポートしています.
デフォルトの入力サイズは299x299.
### 引数
- include_top: ネットワークの出力層側にある全結合層を含むかどうか.
- weights: `None` (ランダム初期化) か `'imagenet'` (ImageNetで学習した重み) のどちらか一方.
- input_tensor: モデルの入力画像として利用するためのオプションのKerasテンソル (つまり,`layers.Input()`の出力)
- input_shape: オプショナルなshapeのタプル,`include_top`がFalseの場合のみ指定可能 (そうでないときは入力のshapeは`(299, 299, 3)`).正確に3つの入力チャンネルをもつ必要があり,width と height は71以上にする必要があります.例えば`(150, 150, 3)`は有効な値です.
- pooling: 特徴量抽出のためのオプショナルなpooling mode,`include_top`が`False`の場合のみ指定可能.
- `None`:モデルの出力が,最後のconvolutional layerの4階テンソルであることを意味しています.
- `'avg'`:最後のconvolutional layerの出力にglobal average poolingが適用されることで,モデルの出力が2階テンソルになることを意味しています.
- `'max'`:global max poolingが適用されることを意味します.
- classes: 画像のクラス分類のためのオプショナルなクラス数,`include_top`が`True`かつ`weights`が指定されていない場合のみ指定可能.
### 戻り値
Kerasの`Model`インスタンス.
### 参考文献
- [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)
### ライセンス
この重みは私達自身が学習したもので,MITライセンスの下で公開されています.
-----
## VGG16
```python
keras.applications.vgg16.VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNetで事前学習した重みを利用可能なVGG16モデル.
`'channels_first'`データフォーマット (channels, height, width) か`'channels_last'`データフォーマット (height, width, channels)の両方で構築可能です.
デフォルトの入力サイズは224x224.
### 引数
- include_top: ネットワークの出力層側にある3つの全結合層を含むかどうか.
- weights: `None` (ランダム初期化) か `'imagenet'` (ImageNetで学習した重み) のどちらか一方.
- input_tensor: モデルの入力画像として利用するためのオプションのKerasテンソル (つまり,`layers.Input()`の出力)
- input_shape: オプショナルなshapeのタプル,`include_top`が`False`の場合のみ指定可能 (そうでないときは入力のshapeは`(224, 224, 3)` (`'channels_last'`データフォーマットのとき) か `(3, 224, 224)` (`'channels_first'`データフォーマットのとき) ).正確に3つの入力チャンネルをもつ必要があり,width と height は48以上にする必要があります.例えば`(200, 200, 3)`は有効値.
- pooling: 特徴量抽出のためのオプショナルなpooling mode,`include_top`が`False`の場合のみ指定可能.
- `None`:モデルの出力が,最後のconvolutional layerの4階テンソルであることを意味しています.
- `'avg'`:最後のconvolutional layerの出力にglobal average poolingが適用されることで,モデルの出力が2階テンソルになることを意味しています.
- `'max'`:global max poolingが適用されることを意味します.
- classes: 画像のクラス分類のためのオプショナルなクラス数,`include_top`が`True`かつ`weights`が指定されていない場合のみ指定可能.
### 戻り値
Kerasの`Model`インスタンス.
### 参考文献
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556): please cite this paper if you use the VGG models in your work.
### ライセンス
この重みは[Oxford大学のVGG](http://www.robots.ox.ac.uk/~vgg/research/very_deep/)により[Creative Commons Attribution License](https://creativecommons.org/licenses/by/4.0/)の下で公開されたものを移植しています.
-----
## VGG19
```python
keras.applications.vgg19.VGG19(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNetで事前学習した重みを利用可能なVGG19モデル.
`'channels_first'`データフォーマット (channels, height, width) か`'channels_last'`データフォーマット (height, width, channels)の両方で構築可能です.
デフォルトの入力サイズは224x224.
### 引数
- include_top: ネットワークの出力層側にある3つの全結合層を含むかどうか.
- weights: `None` (ランダム初期化) か `'imagenet'` (ImageNetで学習した重み) の一方.
- input_tensor: モデルの入力画像として利用するためのオプションのKerasテンソル (つまり,`layers.Input()`の出力)
- input_shape: オプショナルなshapeのタプル,`include_top`がFalseの場合のみ指定可能 (そうでないときは入力のshapeは`(224, 224, 3)` (`'channels_last'`データフォーマットのとき) か `(3, 224, 224)` (`'channels_first'`データフォーマットのとき) ).正確に3つの入力チャンネルをもつ必要があり,width と height は48以上にする必要があります.例えば`(200, 200, 3)`は有効値.
- pooling: 特徴量抽出のためのオプショナルなpooling mode,`include_top`が`False`の場合のみ指定可能.
- `None`:モデルの出力が,最後のconvolutional layerの4階テンソルであることを意味しています.
- `'avg'`:最後のconvolutional layerの出力にglobal average poolingが適用されることで,モデルの出力が2階テンソルになることを意味しています.
- `'max'`:global max poolingが適用されることを意味します.
- classes: 画像のクラス分類のためのオプショナルなクラス数,`include_top`が`True`かつ`weights`が指定されていない場合のみ指定可能.
### 戻り値
Kerasの`Model`インスタンス.
### 参考文献
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
### ライセンス
この重みは[Oxford大学のVGG](http://www.robots.ox.ac.uk/~vgg/research/very_deep/)により[Creative Commons Attribution License](https://creativecommons.org/licenses/by/4.0/)の下で公開されたものを移植しています.
-----
## ResNet
```python
keras.applications.resnet.ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet.ResNet101(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet.ResNet152(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet_v2.ResNet50V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet_v2.ResNet101V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet_v2.ResNet152V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNetで事前学習した重みを利用可能なResNet, ResNetV2モデル.
`'channels_first'`データフォーマット (channels, height, width) か`'channels_last'`データフォーマット (height, width, channels)の両方で構築可能です.
デフォルトの入力サイズは224x224.
### 引数
- include_top: ネットワークの出力層側にある全結合層を含むかどうか.
- weights: `None` (ランダム初期化) か `'imagenet'` (ImageNetで学習した重み) の一方.
- input_tensor: モデルの入力画像として利用するためのオプションのKerasテンソル (つまり,`layers.Input()`の出力)
- input_shape: オプショナルなshapeのタプル,`include_top`がFalseの場合のみ指定可能 (そうでないときは入力のshapeは`(224, 224, 3)` (`'channels_last'`データフォーマットのとき) か `(3, 224, 224)` (`'channels_first'`データフォーマットのとき) ).正確に3つの入力チャンネルをもつ必要があり,width と height は197以上にする必要があります.例えば`(200, 200, 3)`は有効値.
- pooling: 特徴量抽出のためのオプショナルなpooling mode,`include_top`が`False`の場合のみ指定可能.
- `None`:モデルの出力が,最後のconvolutional layerの4階テンソルであることを意味しています.
- `'avg'`:最後のconvolutional layerの出力にglobal average poolingが適用されることで,モデルの出力が2階テンソルになることを意味しています.
- `'max'`:global max poolingが適用されることを意味します.
- classes: 画像のクラス分類のためのオプショナルなクラス数,`include_top`が`True`かつ`weights`が指定されていない場合のみ指定可能.
### 戻り値
Kerasの`Model`インスタンス.
### 参考文献
- `ResNet`: [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
- `ResNetV2`: [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027)
### ライセンス
これらの重みは以下のライセンスの元で公開されたものを移植しています:
- `ResNet`: [The original repository of Kaiming He](https://github.com/KaimingHe/deep-residual-networks) under the [MIT license](https://github.com/KaimingHe/deep-residual-networks/blob/master/LICENSE).
- `ResNetV2`: [Facebook](https://github.com/facebook/fb.resnet.torch) under the [BSD license](https://github.com/facebook/fb.resnet.torch/blob/master/LICENSE).
-----
## InceptionV3
```python
keras.applications.inception_v3.InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNetで事前学習した重みを利用可能なInception V3モデル.
`'channels_first'`データフォーマット (channels, height, width) か`'channels_last'`データフォーマット (height, width, channels)の両方で構築可能です.
デフォルトの入力サイズは299x299.
### 引数
- include_top: ネットワークの出力層側にある全結合層を含むかどうか.
- weights: `None` (ランダム初期化) か `'imagenet'` (ImageNetで学習した重み) の一方.
- input_tensor: モデルの入力画像として利用するためのオプションのKerasテンソル (つまり,`layers.Input()`の出力)
- input_shape: オプショナルなshapeのタプル,`include_top`がFalseの場合のみ指定可能 (そうでないときは入力のshapeは`(299, 299, 3)` (`'channels_last'`データフォーマットのとき) か `(3, 299, 299)` (`'channels_first'`データフォーマットのとき) ).正確に3つの入力チャンネルをもつ必要があり,width と height は139以上にする必要があります.例えば`(150, 150, 3)`は有効値.
- pooling: 特徴量抽出のためのオプショナルなpooling mode,`include_top`が`False`の場合のみ指定可能.
- `None`:モデルの出力が,最後のconvolutional layerの4階テンソルであることを意味しています.
- `'avg'`:最後のconvolutional layerの出力にglobal average poolingが適用されることで,モデルの出力が2階テンソルになることを意味しています.
- `'max'`:global max poolingが適用されることを意味します.
- classes: 画像のクラス分類のためのオプショナルなクラス数,`include_top`が`True`かつ`weights`が指定されていない場合のみ指定可能.
### 戻り値
Kerasの`Model`インスタンス.
### 参考文献
- [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567)
### ライセンス
この重みは [Apacheライセンス](https://github.com/tensorflow/models/blob/master/LICENSE)の下で公開されています.
-----
## InceptionResNetV2
```python
keras.applications.inception_resnet_v2.InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNetで事前学習したInception-ResNet V2モデル.
`'channels_first'`データフォーマット (channels, height, width) か`'channels_last'`データフォーマット (height, width, channels)の両方で構築可能です.
デフォルトの入力サイズは299x299.
### 引数
- include_top: ネットワークの出力層側にある全結合層を含むかどうか.
- weights: `None` (ランダム初期化) か `'imagenet'` (ImageNetで学習した重み) の一方.
- input_tensor: モデルの入力画像として利用するためのオプションのKerasテンソル (つまり,`layers.Input()`の出力)
- input_shape: オプショナルなshapeのタプル,`include_top`がFalseの場合のみ指定可能 (そうでないときは入力のshapeは`(299, 299, 3)` (`'channels_last'`データフォーマットのとき) か `(3, 299, 299)` (`'channels_first'`データフォーマットのとき) ).正確に3つの入力チャンネルをもつ必要があり,width と height は139以上にする必要があります.例えば`(150, 150, 3)`は有効値.
- pooling: 特徴量抽出のためのオプショナルなpooling mode,`include_top`が`False`の場合のみ指定可能.
- `None`:モデルの出力が,最後のconvolutional layerの4階テンソルであることを意味しています.
- `'avg'`:最後のconvolutional layerの出力にglobal average poolingが適用されることで,モデルの出力が2階テンソルになることを意味しています.
- `'max'`:global max poolingが適用されることを意味します.
- classes: 画像のクラス分類のためのオプショナルなクラス数,`include_top`が`True`かつ`weights`が指定されていない場合のみ指定可能.
### 戻り値
Kerasのモデルインスタンス.
### 参考文献
- [Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
### ライセンス
この重みは [Apacheライセンス](https://github.com/tensorflow/models/blob/master/LICENSE)の下で公開されています.
-----
## MobileNet
```python
keras.applications.mobilenet.MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
```
ImageNetで事前学習したMobileNetモデル.
データフォーマットが`'channels_last'` (height, width, channels)の時のみサポートされることに注意してください.
`load_model`からMobileNetモデルをロードするには,カスタムオブジェクトの`relu6`をインポートし,`custom_objects`パラメータに渡してください.
例
```python
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6})
```
デフォルトの入力サイズは224x224.
### 引数
- input_shape: オプショナルなshapeのタプル,`include_top`が`False`の場合のみ指定可能 (そうでないときは入力のshapeは`(224, 224, 3)` (`'channels_last'`データフォーマットのとき) か `(3, 224, 224)` (`'channels_first'`データフォーマットのとき) ).正確に3つの入力チャンネルをもつ必要があり,width と height は32以上にする必要があります.例えば`(200, 200, 3)`は有効値.
- alpha: ネットワークの幅の制御.
- `alpha` < 1.0の場合,各レイヤーのフィルタ数を比例して減少させます.
- `alpha` > 1.0の場合,各レイヤーのフィルタ層を比例して増加させます.
- `alpha` = 1の場合,論文のデフォルトのフィルタ数が各レイヤーで使われます.
- depth_multiplier: 深さ方向の畳み込みための深さ乗数(resolution multiplierとも呼ばれます)
- dropout: ドロップアウト率
- include_top: ネットワークの出力層側にある全結合層を含むかどうか.
- weights: `None` (ランダム初期化) か `'imagenet'` (ImageNetで学習した重み) の一方.
- input_tensor: モデルの入力画像として利用するためのオプションのKerasテンソル (つまり,`layers.Input()`の出力)
- pooling: 特徴量抽出のためのオプショナルなpooling mode,`include_top`が`False`の場合のみ指定可能.
- `None`:モデルの出力が,最後のconvolutional layerの4階テンソルであることを意味しています.
- `'avg'`:最後のconvolutional layerの出力にglobal average poolingが適用されることで,モデルの出力が2階テンソルになることを意味しています.
- `'max'`:global max poolingが適用されることを意味します.
- classes: 画像のクラス分類のためのオプショナルなクラス数,`include_top`が`True`かつ`weights`が指定されていない場合のみ指定可能.
### 戻り値
Kerasの`Model`インスタンス.
### 参考文献
- [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/pdf/1704.04861.pdf)
### ライセンス
この重みは [Apacheライセンス](https://github.com/tensorflow/models/blob/master/LICENSE)の下で公開されています.
-----
## DenseNet
```python
keras.applications.densenet.DenseNet121(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.densenet.DenseNet169(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.densenet.DenseNet201(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNetで事前学習したDenseNetモデル.
このモデルは`'channels_first'`データフォーマット(channels, height, width)と`'channels_last'`データフォーマット(height, width, channels)の両方で構築可能です.
デフォルトの入力サイズは224x224.
### 引数
- blocks: 4つのdenseレイヤーために構築するブロックの個数.
- include_top: ネットワークの出力層側にある全結合層を含むかどうか.
- weights: `None`(ランダム初期化),'imagenet'(ImageNetでの事前学習),ロードする重みのファイルへのパスのいずれか.
- input_tensor: モデルの入力画像として利用するためのオプションのKerasテンソル(つまり,`layers.Input()`の出力)
- input_shape: オプショナルなshapeのタプル,`include_top`が`False`の場合のみ指定可能 (そうでないときは入力のshapeは`(224, 224, 3)` (`'channels_last'`データフォーマットのとき) か `(3, 224, 224)` (`'channels_first'`データフォーマットのとき) ).正確に3つの入力チャンネルをもつ必要があります.
- pooling: 特徴量抽出のためのオプショナルなpooling mode,`include_top`が`False`の場合のみ指定可能.
- `None`:モデルの出力が,最後のconvolutional layerの4階テンソルであることを意味しています.
- `'avg'`:最後のconvolutional layerの出力にglobal average poolingが適用されることで,モデルの出力が2階テンソルになることを意味しています.
- `'max'`:global max poolingが適用されることを意味します.
- classes: 画像のクラス分類のためのオプショナルなクラス数,`include_top`が`True`かつ`weights`が指定されていない場合のみ指定可能.
### 戻り値
Kerasのモデルインスタンス.
### 参考文献
- [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993) (CVPR 2017 Best Paper Award)
### ライセンス
この重みは[三条項BSDライセンス](https://github.com/liuzhuang13/DenseNet/blob/master/LICENSE)の下で公開されています.
-----
## NASNet
```python
keras.applications.nasnet.NASNetLarge(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
keras.applications.nasnet.NASNetMobile(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
```
ImageNetで事前学習したNeural Architecture Search Network (NASNet)モデル.
デフォルトの入力サイズは,NASNetLargeモデルは331x331,NASNetMobileモデルは224x224.
### 引数
- input_shape: オプショナルなshapeのタプル,`include_top`が`False`の場合のみ指定可能(そうでないときの入力のshapeは,NASNetMobileなら`(224, 224, 3)`(`'channels_last'`データフォーマットのとき)または`(3, 224, 224)`(`'channels_first'`データフォーマットのとき),NASNetLargeなら`(331, 331, 3)`(`'channels_last'`データフォーマットのとき)または`(3, 331, 331)`(`'channels_first'`データフォーマットのとき)).正確に3つの入力チャンネルをもつ必要があり,width と height は32以上にする必要があります.例えば`(200, 200, 3)`は有効値.
- include_top: ネットワークの出力層側にある全結合層を含むかどうか.
- weights: `None`(ランダム初期化)か`'imagenet'`(ImageNetで学習した重み)の一方.
- input_tensor: モデルの入力画像として利用するためのオプションのKerasテンソル(つまり,`layers.Input()`の出力)
- pooling: 特徴量抽出のためのオプショナルなpooling mode,`include_top`が`False`の場合のみ指定可能.
- `None`:モデルの出力が,最後のconvolutional layerの4階テンソルであることを意味しています.
- `'avg'`:最後のconvolutional layerの出力にglobal average poolingが適用されることで,モデルの出力が2階テンソルになることを意味しています.
- `'max'`:global max poolingが適用されることを意味します.
- classes: 画像のクラス分類のためのオプショナルなクラス数,`include_top`が`True`かつ`weights`が指定されていない場合のみ指定可能.
### 戻り値
Kerasの`Model`インスタンス.
### 参考文献
- [Learning Transferable Architectures for Scalable Image Recognition](https://arxiv.org/abs/1707.07012)
### ライセンス
この重みは [Apacheライセンス](https://github.com/tensorflow/models/blob/master/LICENSE)の下で公開されています.
-----
## MobileNetV2
```python
keras.applications.mobilenet_v2.MobileNetV2(input_shape=None, alpha=1.0, depth_multiplier=1, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
```
ImageNetで事前学習したMobileNetV2モデル.
データフォーマットが`'channels_last'` (height, width, channels)の時のみサポートされることに注意してください.
`load_model`からMobileNetV2モデルをロードするには,カスタムオブジェクトの`relu6`をインポートし,`custom_objects`パラメータに渡してください.
例
```python
model = load_model('mobilenet_v2.h5', custom_objects={
'relu6': mobilenetv2.relu6})
```
デフォルトの入力サイズは224x224.
### 引数
- input_shape: オプショナルなshapeのタプル,入力画像の解像度が(224, 224, 3)でないときは指定すべきです. (224, 224, 3)のように正確に3つの入力チャネルが必要です.input_tensorからinput_shapeが推論できるならこのオプションは省くこともできます.入力するinput_tensorとinput_shapeを決めてそれらの値がマッチしていればinput_shapeが用いられ,shapeがマッチしなければエラーを送出します.例えば`(160, 160, 3)`は妥当な値です.
- alpha: ネットワークの幅の制御.MobileNetV2の論文ではwidth multiplierとして知られています.
- `alpha` < 1.0の場合,各レイヤーのフィルタ数を比例して減少させます.
- `alpha` > 1.0の場合,各レイヤーのフィルタ層を比例して増加させます.
- `alpha` = 1の場合,論文のデフォルトのフィルタ数が各レイヤーで使われます.
- depth_multiplier: 深さ方向の畳み込みための深さ乗数(resolution multiplierとも呼ばれます)
- include_top: ネットワークの出力層側にある全結合層を含むかどうか.
- weights: `None`(ランダム初期化)か,`'imagenet'`(ImageNetで学習した重み)か,ロードする重みファイルへのパスのいずれか.
- input_tensor: モデルの入力画像として利用するためのオプションのKerasテンソル (つまり,`layers.Input()`の出力)
- pooling: 特徴量抽出のためのオプショナルなpooling mode,`include_top`が`False`の場合のみ指定可能.
- `None`:モデルの出力が,最後のconvolutional layerの4階テンソルであることを意味しています.
- `'avg'`:最後のconvolutional layerの出力にglobal average poolingが適用されることで,モデルの出力が2階テンソルになることを意味しています.
- `'max'`:global max poolingが適用されることを意味します.
- classes: 画像のクラス分類のためのオプショナルなクラス数,`include_top`がTrueかつ`weights`が指定されていない場合のみ指定可能.
### 戻り値
Kerasのモデルインスタンス.
### 参考文献
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381)
### ライセンス
この重みは [Apacheライセンス](https://github.com/tensorflow/models/blob/master/LICENSE)の下で公開されています.
| keras-docs-ja/sources/applications.md/0 | {
"file_path": "keras-docs-ja/sources/applications.md",
"repo_id": "keras-docs-ja",
"token_count": 14984
} | 85 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/local.py#L15)</span>
### LocallyConnected1D
```python
keras.layers.local.LocallyConnected1D(filters, kernel_size, strides=1, padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
1次元入力に対応したLocally-connectedレイヤーです.
`LocallyConnected1D`は`Conv1D`と似た動作をします.しかし,重みが共有されない,つまり入力のパッチごとに異なるフィルタが適用される点が違います.
__例__
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(LocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
__引数__
- __filters__: 整数,使用するカーネルの数を指定(出力の次元).
- __kernel_size__: 整数,または一つの整数からなるタプル/リスト.1次元畳み込みのウィンドウ長を指定します.
- __strides__: 整数,または一つの整数からなるタプル/リスト.畳み込みのストライド長を指定します.dilation_rate value != 1 とすると,strides value != 1を指定することはできません.
- __padding__: 現在`"valid"`(大文字,小文字は区別されない)のみサポートされます.将来`"same"`がサポートされる予定です.
- __activation__: 使用する活性化関数の名前([activations](../activations.md)を参照).指定がない場合,活性化は適用されない(つまり"線形"活性`a(x) = x`となる).
- __use_bias__: 真理値で,バイアスベクトルを加えるかどうかを指定します.
- __kernel_initializer__: カーネルの重み行列の初期値を指定します.([initializers](../initializers.md)を参照)
- __bias_initializer__: バイアスベクトルの初期値を指定します.([initializers](../initializers.md)を参照).
- __kernel_regularizer__: カーネルの重み行列に適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __bias_regularizer__: バイアスベクトルに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __activity_regularizer__: ネットワーク出力(同ネットワークの「活性化」)に適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __kernel_constraint__: カーネルの重み行列に適用させるConstraintsを指定します.([constraints](../constraints.md)を参照)
- __bias_constraint__: バイアスベクトルに適用させるConstraintsを指定します.([constraints](../constraints.md)を参照)
__入力のshape__
入力は`(batch_size, steps, input_dim)`の3階テンソルです.
__出力のshape__
出力は`(batch_size, new_steps, filters)`の3階テンソルです.
`steps`値はパディングやストライドにより変わることがあります.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/local.py#L179)</span>
### LocallyConnected2D
```python
keras.layers.local.LocallyConnected2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
2次元入力に対応したLocally-connectedレイヤーです.
`LocallyConnected2D`は`Conv2D`と似た動作をします.しかし,重みが共有されない,つまり入力のパッチごとに異なるフィルタが適用される点が違います.
__例__
```python
# apply a 3x3 unshared weights convolution with 64 output filters on a 32x32 image
# with `data_format="channels_last"`:
model = Sequential()
model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# now model.output_shape == (None, 30, 30, 64)
# notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64 parameters
# add a 3x3 unshared weights convolution on top, with 32 output filters:
model.add(LocallyConnected2D(32, (3, 3)))
# now model.output_shape == (None, 28, 28, 32)
```
__引数__
- __filters__: 整数,使用するカーネルの数を指定(出力の次元).
- __kernel_size__: 畳み込みカーネルの幅と高さを指定します.タプル/リストでカーネルの幅と高さをそれぞれ指定でき,整数の場合は正方形のカーネルになります.
- __strides__: カーネルのストライドを指定します. 二つの整数からなるタプル/リストで縦と横のストライドをそれぞれ指定でき,整数の場合は幅と高さが同一のストライドになります.
- __padding__: 現在`"valid"`(大文字,小文字は区別されない)のみサポートされます.将来`"same"`がサポートされる予定です.
- __data_format__: `channels_last`(デフォルト)か`channels_first`を指定します.`channels_last`の場合,入力のshapeは`(batch, height, width, channels)`となり,`channels_first`の場合は`(batch, channels, height, width)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,`channels_last`になります.
- __activation__: 使用する活性化関数の名前([activations](../activations.md)を参照).
指定がない場合,活性化は適用されない(つまり"線形"活性`a(x) = x`となる).
- __use_bias__: 真理値で,バイアスベクトルを加えるかどうかを指定します.
- __kernel_initializer__: カーネルの重み行列の初期値を指定します.([initializers](../initializers.md)を参照)
- __bias_initializer__: バイアスベクトルの初期値を指定します.([initializers](../initializers.md)を参照).
- __kernel_regularizer__: カーネルの重み行列に適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __bias_regularizer__: バイアスベクトルに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __activity_regularizer__: ネットワーク出力(同ネットワークの「活性化」)に適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __kernel_constraint__: カーネルの重み行列に適用させるConstraintsを指定します.([constraints](../constraints.md)を参照)
- __bias_constraint__: バイアスベクトルに適用させるConstraintsを指定します.([constraints](../constraints.md)を参照)
__入力のshape__
`data_format='channels_first'`の場合,入力は`(samples, channels, rows, cols)`の4階テンソルです.
`data_format='channels_last'`の場合,入力は`(samples, rows, cols, channels)`の4階テンソルです.
__出力のshape__
`data_format='channels_first'`の場合,出力は`(samples, filters, new_rows, new_cols)`の4階テンソルです.
`data_format='channels_last'`の場合,出力は`(samples, new_rows, new_cols, filters)`の4階テンソルです.
`rows`と`cols`値はパディングにより変わることがあります.
| keras-docs-ja/sources/layers/local.md/0 | {
"file_path": "keras-docs-ja/sources/layers/local.md",
"repo_id": "keras-docs-ja",
"token_count": 3528
} | 86 |
## Text Preprocessing
## Tokenizer
```python
keras.preprocessing.text.Tokenizer(num_words=None, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' ', char_level=False, oov_token=None, document_count=0)
```
テキストをトークン化するユーティリティクラス.
このクラスは,各テキストを整数の列(各整数はは辞書におけるトークンのインデックス)または単語のカウントやtf-idfなどに基づいて各トークンをバイナリとした係数からなるベクトルに変換することで,テキストコーパスをベクトル化します.
テキストをベクトル化する,または/かつ,テキストをシーケンス(= データセット中でランクi(1から始まる)の単語がインデックスiを持つ単語インデックスのリスト)に変換するクラス.
__引数__
- __num_words__: 利用する単語の最大数で単語の頻度に基づきます.一般的には`num_words-1`が用いられます.
- __filters__: テキストからフィルタする文字の要素からなる文字列.デフォルトでは全ての句読点に加えてタブや改行,マイナス,`'`文字です.
- __lower__: 真理値.テキストを小文字にするかどうか.
- __split__: 文字列.単語を分割するセパレータ.
- __char_level__: Trueなら,全文字はトークンとして扱われます.
- __oov_token__: 与えられた場合,単語のインデックスに付与され,text_to_sequenceが呼ばれた時に語彙にない単語を入れ替えるために使われます.
デフォルトでは全ての句読点は削除され,(`'`文字は含まれるかもしれませんが)スペースで区切られた単語の列に変換されます.これらの列はトークンのリストに分割されます.これらは更にインデックス化またはベクトル化されます.
`0`はどの単語にも割り当てられない予約済みのインデックスです.
---
## hashing_trick
```python
keras.preprocessing.text.hashing_trick(text, n, hash_function=None, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' ')
```
テキストを固定長のハッシュ空間におけるインデックスの系列に変換します.
__引数__
- __text__: 入力テキスト(文字列).
- __n__: ハッシュ空間の次元数.
- __hash_function__: デフォルトはpythonの`hash`関数で,'md5'か文字列を整数に変換する任意の関数にもできます.'hash'は安定したハッシュ関数ではないことに注意してください.そのため,実行ごとに一貫しません.一方で'md5'は安定なハッシュ関数です.
- __filters__: 句読点などフィルタする文字を含むリスト(あるいはコレクション).デフォルトは基本的な句読点,タブ,改行を含む`!"#$%&()*+,-./:;<=>?@[\]^_`{|}~\t\n`です.
- __lower__: 真理値.テキストを小文字にするかどうか.
- __split__: 文字列.単語を分割するセパレータ.
__戻り値__
単語のインデックスを表す整数のリスト(単一性を保証しない).
`0`はどの単語にも割り当てられない予約済みのインデックスです.
ハッシュ関数の衝突の可能性があるため,2つ以上の単語が同じインデックスに割り当てられるかもしれません.この衝突の[可能性](https://en.wikipedia.org/wiki/Birthday_problem#Probability_table)はハッシュ空間の次元と固有のオブジェクトの数に関係しています.
---
## one_hot
```python
keras.preprocessing.text.one_hot(text, n, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' ')
```
文章を単語インデックス(語彙数n)のリストにone-hotエンコードします.
ハッシュ関数として`hash`を使用する`hashing_trick`のラッパーです.単語の単一性の保証はしません.
__引数__
- __text__: 入力テキスト(文字列).
- __n__: 整数.語彙数.
- __filters__: 句読点などフィルタする文字を含むリスト(あるいはコレクション).デフォルトは基本的な句読点,タブ,改行を含む`!"#$%&()*+,-./:;<=>?@[\]^_`{|}~\t\n`です.
- __lower__: 真理値.テキストを小文字にするかどうか.
- __split__: 文字列.単語を分割するセパレータ.
__戻り値__
[1, n]の整数から構成されるリスト.各整数は単語をエンコードします(単一性は保証されません).
---
## text_to_word_sequence
```python
keras.preprocessing.text.text_to_word_sequence(text, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' ')
```
文章を単語(またはトークン)のリストに分割します.
- __引数__
- __text__: 入力テキスト(文字列).
- __filters__: 句読点のようなフィルタする文字を含むリスト(あるいは連結文字列).デフォルトは基本的な句読点,タブ,改行を含む`!"#$%&()*+,-./:;<=>?@[\]^_`{|}~\t\n`です.
- __lower__: 真理値.テキストを小文字にするかどうか.
- __split__: 文字列.単語を分割するセパレータ.
__戻り値__
単語(またはトークン)のリスト. | keras-docs-ja/sources/preprocessing/text.md/0 | {
"file_path": "keras-docs-ja/sources/preprocessing/text.md",
"repo_id": "keras-docs-ja",
"token_count": 2691
} | 87 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L19)</span>
### LeakyReLU
```python
keras.layers.LeakyReLU(alpha=0.3)
```
Leaky Rectified Linear Unit 활성화 함수입니다.
유닛이 활성화되지 않는 경우 작은 그래디언트를 허용합니다.
Leaky ReLU는 다음과 같습니다.
- `x < 0`인 경우, `f(x) = alpha * x`
- `x >= 0`인 경우, `f(x) = x`
__입력 형태__
입력값의 형태는 임의로 지정됩니다. `LeakyReLU`을 모델의 첫 번째 층으로 사용하는 경우 키워드 인자로 `input_shape` 인자<sub>argument</sub>(샘플 축<sub>axis</sub>을 제외한 `int` 튜플)를 지정해야 합니다.
__출력 형태__
입력 형태와 동일합니다.
__인자__
- __alpha__: 음이 아닌 `float`. `x < 0`인 경우의 기울기 계수.
__참고__
- [Rectifier Nonlinearities Improve Neural Network Acoustic Models](
https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L59)</span>
### PReLU
```python
keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
```
Parametric Rectified Linear Unit 활성화 함수입니다.
PReLU는 다음과 같습니다.
- `x < 0`인 경우, `f(x) = alpha * x`
- `x >= 0`인 경우, `f(x) = x`
`alpha`는 x와 동일한 형태를 가진 학습된 배열입니다.
__입력 형태__
입력값의 형태는 임의로 지정됩니다. `PReLU`를 모델의 첫 번째 층으로 사용하는 경우 키워드 인자로 `input_shape` 인자(샘플 축을 제외한 `int` 튜플)를 지정해야 합니다.
__출력 형태__
입력 형태와 동일합니다.
__인자__
- __alpha_initializer__: 가중치<sub>weights</sub>를 위한 초기화 함수<sub>initializer</sub>.
- __alpha_regularizer__: 가중치를 위한 규제 함수<sub>regularizer</sub>.
- __alpha_constraint__: 가중치의 제약<sub>constraint</sub>.
- __shared_axes__: 활성화 함수에서 학습 가능한 파라미터들을 공유할 축. 예를 들어, 만일 입력 특징 맵<sub>feature map</sub>들이 2D 합성곱<sub>convolution</sub>으로부터 생성되어 `(batch, height, width, channels)`의 형태를 가진다면, `shared_axes=[1, 2]`로 설정하여 각 필터가 하나의 매개 변수 세트를 공유하도록 할 수 있습니다.
__참고__
- [Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification](https://arxiv.org/abs/1502.01852)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L153)</span>
### ELU
```python
keras.layers.ELU(alpha=1.0)
```
Exponential Linear Unit 활성화 함수입니다.
ELU는 다음과 같습니다.
- `x < 0`인 경우, `f(x) = alpha * (exp(x) - 1)`
- `x >= 0`인 경우, `f(x) = x`
__입력 형태__
입력값의 형태는 임의로 지정됩니다. `ELU` 모델의 첫 번째 층으로 사용하는 경우 키워드 인자로 `input_shape` 인자(샘플 축을 제외한 `int` 튜플)를 지정해야 합니다.
__출력 형태__
입력 형태와 동일합니다.
__인자__
- __alpha__: `x < 0`인 경우 곱할 배수<sub>factor</sub>.
__참고__
- [Fast and Accurate Deep Network Learning by Exponential Linear Units
(ELUs)](https://arxiv.org/abs/1511.07289v1)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L193)</span>
### ThresholdedReLU
```python
keras.layers.ThresholdedReLU(theta=1.0)
```
Thresholded Rectified Linear Unit 활성화 함수입니다.
Thresholded ReLU는 다음과 같습니다.
- `x > theta`인 경우, `f(x) = x`
- `x <= theta`인 경우, `f(x) = 0`
__입력 형태__
입력값의 형태는 임의로 지정됩니다. `ThresholdedReLU`를 모델의 첫 번째 층으로 사용하는 경우 키워드 인자로 `input_shape` 인자(샘플 축을 제외한 `int` 튜플)를 지정해야 합니다.
__출력 형태__
입력 형태와 동일합니다.
__인자__
- __theta__: 음이 아닌 `float`. 활성화가 일어나는 임계값.
__참고__
- [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](
https://arxiv.org/abs/1402.3337)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L233)</span>
### Softmax
```python
keras.layers.Softmax(axis=-1)
```
Softmax 활성화 함수입니다.
__입력 형태__
입력값의 형태는 임의로 지정됩니다. `Softmax`를 모델의 첫 번째 층으로 사용하는 경우 키워드 인자로 `input_shape` 인자(샘플 축을 제외한 `int` 튜플)를 지정해야 합니다.
__출력 형태__
입력 형태와 동일합니다.
__인자__
- __axis__: `int`. softmax 정규화<sub>normalization</sub>가 적용되는 축.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L265)</span>
### ReLU
```python
keras.layers.ReLU(max_value=None, negative_slope=0.0, threshold=0.0)
```
Rectified Linear Unit 활성화 함수입니다.
인자들의 기본값을 사용하면 원소별로<sub>element-wise</sub> 연산된 `max(x, 0)`를 반환합니다.
다른 인자를 사용하면 다음과 같습니다.
- `x >= max_value`인 경우, `f(x) = max_value`
- `threshold <= x < max_value`인 경우, `f(x) = x`
- 나머지 경우, `f(x) = negative_slope * (x - threshold)`.
__입력 형태__
입력값의 형태는 임의로 지정됩니다. `ReLU`를 모델의 첫 번째 층으로 사용하는 경우 키워드 인자로 `input_shape` 인자(샘플 축을 제외한 `int` 튜플)를 지정해야 합니다.
__출력 형태__
입력 형태와 동일합니다.
__인자__
- __max_value__: 음이 아닌 `float`. 활성화 함수의 최댓값.
- __negative_slope__: 음이 아닌 `float`. `x < 0`인 경우의 기울기 계수.
- __threshold__: `float`. 임계값이 정해진 활성화 함수인 경우, 임계값.
| keras-docs-ko/sources/layers/advanced-activations.md/0 | {
"file_path": "keras-docs-ko/sources/layers/advanced-activations.md",
"repo_id": "keras-docs-ko",
"token_count": 4003
} | 88 |
# 在 bAbI 数据集上训练一个记忆网络。
参考文献:
- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,
["Towards AI-Complete Question Answering:
A Set of Prerequisite Toy Tasks"](http://arxiv.org/abs/1502.05698)
- Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,
["End-To-End Memory Networks"](http://arxiv.org/abs/1503.08895)
120 轮迭代后,在 'single_supporting_fact_10k' 任务上达到了 98.6% 的准确率。
每轮迭代时间: 3s on CPU (core i7).
```python
from __future__ import print_function
from keras.models import Sequential, Model
from keras.layers.embeddings import Embedding
from keras.layers import Input, Activation, Dense, Permute, Dropout
from keras.layers import add, dot, concatenate
from keras.layers import LSTM
from keras.utils.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
from functools import reduce
import tarfile
import numpy as np
import re
def tokenize(sent):
'''返回包含标点符号的句子的标记。
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split(r'(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''解析 bAbi 任务格式中提供的故事
如果 only_supporting 为 true,
则只保留支持答案的句子。
'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
if only_supporting:
# 只选择相关的子故事
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# 提供所有子故事
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
'''给定文件名,读取文件,检索故事,
然后将句子转换为一个独立故事。
如果提供了 max_length,
任何长于 max_length 的故事都将被丢弃。
'''
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data
if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data):
inputs, queries, answers = [], [], []
for story, query, answer in data:
inputs.append([word_idx[w] for w in story])
queries.append([word_idx[w] for w in query])
answers.append(word_idx[answer])
return (pad_sequences(inputs, maxlen=story_maxlen),
pad_sequences(queries, maxlen=query_maxlen),
np.array(answers))
try:
path = get_file('babi-tasks-v1-2.tar.gz',
origin='https://s3.amazonaws.com/text-datasets/'
'babi_tasks_1-20_v1-2.tar.gz')
except:
print('Error downloading dataset, please download it manually:\n'
'$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2'
'.tar.gz\n'
'$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
raise
challenges = {
# QA1 任务,10,000 样本
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_'
'single-supporting-fact_{}.txt',
# QA2 任务,1000 样本
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_'
'two-supporting-facts_{}.txt',
}
challenge_type = 'single_supporting_fact_10k'
challenge = challenges[challenge_type]
print('Extracting stories for the challenge:', challenge_type)
with tarfile.open(path) as tar:
train_stories = get_stories(tar.extractfile(challenge.format('train')))
test_stories = get_stories(tar.extractfile(challenge.format('test')))
vocab = set()
for story, q, answer in train_stories + test_stories:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
# 保留 0 以留作 pad_sequences 进行 masking
vocab_size = len(vocab) + 1
story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories)))
query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories)))
print('-')
print('Vocab size:', vocab_size, 'unique words')
print('Story max length:', story_maxlen, 'words')
print('Query max length:', query_maxlen, 'words')
print('Number of training stories:', len(train_stories))
print('Number of test stories:', len(test_stories))
print('-')
print('Here\'s what a "story" tuple looks like (input, query, answer):')
print(train_stories[0])
print('-')
print('Vectorizing the word sequences...')
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
inputs_train, queries_train, answers_train = vectorize_stories(train_stories)
inputs_test, queries_test, answers_test = vectorize_stories(test_stories)
print('-')
print('inputs: integer tensor of shape (samples, max_length)')
print('inputs_train shape:', inputs_train.shape)
print('inputs_test shape:', inputs_test.shape)
print('-')
print('queries: integer tensor of shape (samples, max_length)')
print('queries_train shape:', queries_train.shape)
print('queries_test shape:', queries_test.shape)
print('-')
print('answers: binary (1 or 0) tensor of shape (samples, vocab_size)')
print('answers_train shape:', answers_train.shape)
print('answers_test shape:', answers_test.shape)
print('-')
print('Compiling...')
# 占位符
input_sequence = Input((story_maxlen,))
question = Input((query_maxlen,))
# 编码器
# 将输入序列编码为向量的序列
input_encoder_m = Sequential()
input_encoder_m.add(Embedding(input_dim=vocab_size,
output_dim=64))
input_encoder_m.add(Dropout(0.3))
# 输出: (samples, story_maxlen, embedding_dim)
# 将输入编码为的向量的序列(向量尺寸为 query_maxlen)
input_encoder_c = Sequential()
input_encoder_c.add(Embedding(input_dim=vocab_size,
output_dim=query_maxlen))
input_encoder_c.add(Dropout(0.3))
# 输出: (samples, story_maxlen, query_maxlen)
# 将问题编码为向量的序列
question_encoder = Sequential()
question_encoder.add(Embedding(input_dim=vocab_size,
output_dim=64,
input_length=query_maxlen))
question_encoder.add(Dropout(0.3))
# 输出: (samples, query_maxlen, embedding_dim)
# 编码输入序列和问题(均已索引化)为密集向量的序列
input_encoded_m = input_encoder_m(input_sequence)
input_encoded_c = input_encoder_c(input_sequence)
question_encoded = question_encoder(question)
# 计算第一个输入向量和问题向量序列的『匹配』('match')
# 尺寸: `(samples, story_maxlen, query_maxlen)`
match = dot([input_encoded_m, question_encoded], axes=(2, 2))
match = Activation('softmax')(match)
# 将匹配矩阵与第二个输入向量序列相加
response = add([match, input_encoded_c]) # (samples, story_maxlen, query_maxlen)
response = Permute((2, 1))(response) # (samples, query_maxlen, story_maxlen)
# 拼接匹配矩阵和问题向量序列
answer = concatenate([response, question_encoded])
# 原始论文使用一个矩阵乘法来进行归约操作。
# 我们在此选择使用 RNN。
answer = LSTM(32)(answer) # (samples, 32)
# 一个正则化层 - 可能还需要更多层
answer = Dropout(0.3)(answer)
answer = Dense(vocab_size)(answer) # (samples, vocab_size)
# 输出词汇表的一个概率分布
answer = Activation('softmax')(answer)
# 构建最终模型
model = Model([input_sequence, question], answer)
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# 训练
model.fit([inputs_train, queries_train], answers_train,
batch_size=32,
epochs=120,
validation_data=([inputs_test, queries_test], answers_test))
```
| keras-docs-zh/sources/examples/babi_memnn.md/0 | {
"file_path": "keras-docs-zh/sources/examples/babi_memnn.md",
"repo_id": "keras-docs-zh",
"token_count": 3917
} | 89 |
# 还原字符级序列到序列模型以生成预测。
该脚本载入由 [lstm_seq2seq.py](/examples/lstm_seq2seq/) 保存的 ```s2s.h5``` 模型,并从中生成序列。
它假设其未作任何改变(例如,```latent_dim```、输入数据和模型结构均不变)。
有关模型结构细节以及如何训练,参见 [lstm_seq2seq.py](/examples/lstm_seq2seq/)。
```python
from __future__ import print_function
from keras.models import Model, load_model
from keras.layers import Input
import numpy as np
batch_size = 64 # 训练批次大小。
epochs = 100 # 训练轮次数。
latent_dim = 256 # 编码空间隐层维度。
num_samples = 10000 # 训练样本数。
# 磁盘中数据文件路径。
data_path = 'fra-eng/fra.txt'
# 向量化数据。使用与训练脚本相同的方法。
# 注意: 数据必须相同,以使字符->整数映射保持一致。
# 我们省略对 target_texts 的编码,因为不需要它们。
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text = line.split('\t')
# 我们使用 "tab" 作为目标的 "开始序列" 字符,并使用 "\n" 作为 "结束序列" 字符。
target_text = '\t' + target_text + '\n'
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
print('Number of samples:', len(input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
for i, input_text in enumerate(input_texts):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.
# 恢复模型并构造编码器和解码器。
model = load_model('s2s.h5')
encoder_inputs = model.input[0] # input_1
encoder_outputs, state_h_enc, state_c_enc = model.layers[2].output # lstm_1
encoder_states = [state_h_enc, state_c_enc]
encoder_model = Model(encoder_inputs, encoder_states)
decoder_inputs = model.input[1] # input_2
decoder_state_input_h = Input(shape=(latent_dim,), name='input_3')
decoder_state_input_c = Input(shape=(latent_dim,), name='input_4')
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_lstm = model.layers[3]
decoder_outputs, state_h_dec, state_c_dec = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h_dec, state_c_dec]
decoder_dense = model.layers[4]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 反向查询 token 索引可将序列解码回可读的内容。
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
# 解码输入序列。未来的工作应支持波束搜索。
def decode_sequence(input_seq):
# 将输入编码为状态向量。
states_value = encoder_model.predict(input_seq)
# 生成长度为 1 的空目标序列。
target_seq = np.zeros((1, 1, num_decoder_tokens))
# 用起始字符填充目标序列的第一个字符。
target_seq[0, 0, target_token_index['\t']] = 1.
# 一批序列的采样循环
# (为了简化,这里我们假设一批大小为 1)。
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# 采样一个 token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# 退出条件:达到最大长度或找到停止符。
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# 更新目标序列(长度为 1)。
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# 更新状态
states_value = [h, c]
return decoded_sentence
for seq_index in range(100):
# 抽取一个序列(训练集的一部分)进行解码。
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Input sentence:', input_texts[seq_index])
print('Decoded sentence:', decoded_sentence)
``` | keras-docs-zh/sources/examples/lstm_seq2seq_restore.md/0 | {
"file_path": "keras-docs-zh/sources/examples/lstm_seq2seq_restore.md",
"repo_id": "keras-docs-zh",
"token_count": 2698
} | 90 |
# 加载预训练词嵌入
该脚本将预训练的词嵌入(GloVe 嵌入)加载到冻结的 Keras 嵌入层中,并使用它在 20 个新闻组数据集上训练文本分类模型(将新闻组消息分类为 20 个不同的类别)。
可以在以下位置找到 GloVe 嵌入数据:
http://nlp.stanford.edu/data/glove.6B.zip
(source page: http://nlp.stanford.edu/projects/glove/)
20个新闻组数据可在以下位置找到:
http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html
```python
from __future__ import print_function
import os
import sys
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
from keras.initializers import Constant
BASE_DIR = ''
GLOVE_DIR = os.path.join(BASE_DIR, 'glove.6B')
TEXT_DATA_DIR = os.path.join(BASE_DIR, '20_newsgroup')
MAX_SEQUENCE_LENGTH = 1000
MAX_NUM_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
# 首先,在嵌入集中将索引映射词构建为其嵌入向量
print('Indexing word vectors.')
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt')) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, 'f', sep=' ')
embeddings_index[word] = coefs
print('Found %s word vectors.' % len(embeddings_index))
# 其次,准备文本样本及其标签
print('Processing text dataset')
texts = [] # 文字样本列表
labels_index = {} # 将标签名称映射到数字 ID 的字典
labels = [] # 标签 ID 列表
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
args = {} if sys.version_info < (3,) else {'encoding': 'latin-1'}
with open(fpath, **args) as f:
t = f.read()
i = t.find('\n\n') # 跳过标题
if 0 < i:
t = t[i:]
texts.append(t)
labels.append(label_id)
print('Found %s texts.' % len(texts))
# 最后,将文本样本向量化为 2D 整数张量
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# 将数据分为训练集和验证集
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
num_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
print('Preparing embedding matrix.')
# 准备嵌入矩阵
num_words = min(MAX_NUM_WORDS, len(word_index) + 1)
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i >= MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# 嵌入索引中找不到的单词将为全零。
embedding_matrix[i] = embedding_vector
# 将预训练的词嵌入加载到嵌入层中
# 请注意,我们设置了 trainable = False,以便保持嵌入固定
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# 使用全局 maxpooling 训练 1D convnet
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = GlobalMaxPooling1D()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
model.fit(x_train, y_train,
batch_size=128,
epochs=10,
validation_data=(x_val, y_val))
``` | keras-docs-zh/sources/examples/pretrained_word_embeddings.md/0 | {
"file_path": "keras-docs-zh/sources/examples/pretrained_word_embeddings.md",
"repo_id": "keras-docs-zh",
"token_count": 2464
} | 91 |
# Keras: 基于 Python 的深度学习库
<img src='https://s3.amazonaws.com/keras.io/img/keras-logo-2018-large-1200.png', style='max-width: 600px;'>
## 你恰好发现了 Keras。
Keras 是一个用 Python 编写的高级神经网络 API,它能够以 [TensorFlow](https://github.com/tensorflow/tensorflow), [CNTK](https://github.com/Microsoft/cntk) 或者 [Theano](https://github.com/Theano/Theano) 作为后端运行。Keras 的开发重点是支持快速的实验。*能够以最小的时延把你的想法转换为实验结果,是做好研究的关键。*
如果你在以下情况下需要深度学习库,请使用 Keras:
- 允许简单而快速的原型设计(由于用户友好,高度模块化,可扩展性)。
- 同时支持卷积神经网络和循环神经网络,以及两者的组合。
- 在 CPU 和 GPU 上无缝运行。
查看文档,请访问 [Keras.io](https://keras.io/zh/)。备份网址:[Keras-zh](https://keras-zh.readthedocs.io/)。
Keras 兼容的 Python 版本: __Python 2.7-3.6__。
------------------
## 多后端 Keras 和 tf.keras:
**目前,我们推荐使用 TensorFlow 后端的 Keras 用户切换至 TensorFlow 2.0 的 `tf.keras`。**
`tf.keras` 具有更好的维护,并且更好地集成了 TensorFlow 功能(eager执行,分布式支持及其他)。
Keras 2.2.5 是最后一个实现 2.2.* API 的 Keras 版本。它是最后一个仅支持 TensorFlow 1(以及 Theano 和 CNTK)的版本。
Keras 的当前版本是 2.3.0,它对 API 做了重大的调整,并且添加了 TensorFlow 2.0 的支持。2.3.0 将会是最后一个多后端 Keras 主版本。多后端 Keras 已被 `tf.keras` 取代。
多后端 Keras 中存在的错误修复仅会持续到 2020 年 4 月(作为次要版本的一部分)。
关于 Keras 未来的更多信息,详见 [the Keras meeting notes](http://bit.ly/keras-meeting-notes)。
## 指导原则
- __用户友好。__ Keras 是为人类而不是为机器设计的 API。它把用户体验放在首要和中心位置。Keras 遵循减少认知困难的最佳实践:它提供一致且简单的 API,将常见用例所需的用户操作数量降至最低,并且在用户错误时提供清晰和可操作的反馈。
- __模块化。__ 模型被理解为由独立的、完全可配置的模块构成的序列或图。这些模块可以以尽可能少的限制组装在一起。特别是神经网络层、损失函数、优化器、初始化方法、激活函数、正则化方法,它们都是可以结合起来构建新模型的模块。
- __易扩展性。__ 新的模块是很容易添加的(作为新的类和函数),现有的模块已经提供了充足的示例。由于能够轻松地创建可以提高表现力的新模块,Keras 更加适合高级研究。
- __基于 Python 实现。__ Keras 没有特定格式的单独配置文件。模型定义在 Python 代码中,这些代码紧凑,易于调试,并且易于扩展。
------------------
## 快速开始:30 秒上手 Keras
Keras 的核心数据结构是 __model__,一种组织网络层的方式。最简单的模型是 [Sequential 顺序模型](/getting-started/sequential-model-guide),它由多个网络层线性堆叠。对于更复杂的结构,你应该使用 [Keras 函数式 API](/getting-started/functional-api-guide),它允许构建任意的神经网络图。
`Sequential` 模型如下所示:
```python
from keras.models import Sequential
model = Sequential()
```
可以简单地使用 `.add()` 来堆叠模型:
```python
from keras.layers import Dense
model.add(Dense(units=64, activation='relu', input_dim=100))
model.add(Dense(units=10, activation='softmax'))
```
在完成了模型的构建后, 可以使用 `.compile()` 来配置学习过程:
```python
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
```
如果需要,你还可以进一步地配置你的优化器。Keras 的核心原则是使事情变得相当简单,同时又允许用户在需要的时候能够进行完全的控制(终极的控制是源代码的易扩展性)。
```python
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True))
```
现在,你可以批量地在训练数据上进行迭代了:
```python
# x_train 和 y_train 是 Numpy 数组 -- 就像在 Scikit-Learn API 中一样。
model.fit(x_train, y_train, epochs=5, batch_size=32)
```
或者,你可以手动地将批次的数据提供给模型:
```python
model.train_on_batch(x_batch, y_batch)
```
只需一行代码就能评估模型性能:
```python
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)
```
或者对新的数据生成预测:
```python
classes = model.predict(x_test, batch_size=128)
```
构建一个问答系统,一个图像分类模型,一个神经图灵机,或者其他的任何模型,就是这么的快。深度学习背后的思想很简单,那么它们的实现又何必要那么痛苦呢?
有关 Keras 更深入的教程,请查看:
- [开始使用 Sequential 模型](/getting-started/sequential-model-guide)
- [开始使用函数式 API](/getting-started/functional-api-guide)
在代码仓库的 [examples](https://github.com/keras-team/keras/tree/master/examples) 目录中,你会找到更多高级模型:基于记忆网络的问答系统、基于栈式 LSTM 的文本生成等等。
------------------
## 安装指引
在安装 Keras 之前,请安装以下后端引擎之一: TensorFlow, Theano 或者 CNTK。我们推荐 TensorFlow 后端。
- [TensorFlow 安装指引](https://www.tensorflow.org/install/)。
- [Theano 安装指引](http://deeplearning.net/software/theano/install.html#install)。
- [CNTK 安装指引](https://docs.microsoft.com/en-us/cognitive-toolkit/setup-cntk-on-your-machine)。
你也可以考虑安装以下**可选依赖**:
- [cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) (如果你计划在 GPU 上运行 Keras,建议安装)。
- HDF5 和 [h5py](http://docs.h5py.org/en/latest/build.html) (如果你需要将 Keras 模型保存到磁盘,则需要这些)。
- [graphviz](https://graphviz.gitlab.io/download/) 和 [pydot](https://github.com/erocarrera/pydot) (用于绘制模型图的[可视化工具](https://keras.io/zh/visualization/))。
然后你就可以安装 Keras 本身了。有两种方法安装 Keras:
- **使用 PyPI 安装 Keras(推荐):**
注意:这些安装步骤假定你在 Linux 或 Mac 环境中。
如果你使用的是 Windows,则需要删除 `sudo` 才能运行以下命令。
```sh
sudo pip install keras
```
如果你使用 virtualenv 虚拟环境, 你可以避免使用 sudo:
```sh
pip install keras
```
- **或者:使用 GitHub 源码安装 Keras:**
首先,使用 `git` 来克隆 Keras:
```sh
git clone https://github.com/keras-team/keras.git
```
然后,`cd` 到 Keras 目录并且运行安装命令:
```sh
cd keras
sudo python setup.py install
```
------------------
## 配置你的 Keras 后端
默认情况下,Keras 将使用 TensorFlow 作为其张量操作库。请[跟随这些指引](https://keras.io/zh/backend/)来配置其他 Keras 后端。
------------------
## 技术支持
你可以提出问题并参与开发讨论:
- [Keras Google group](https://groups.google.com/forum/#!forum/keras-users)。
- [Keras Slack channel](https://kerasteam.slack.com)。使用 [这个链接](https://keras-slack-autojoin.herokuapp.com/) 向该频道请求邀请函。
- 或者加入 Keras 深度学习交流群,协助文档的翻译工作,群号为 951623081。
你也可以在 [GitHub issues](https://github.com/keras-team/keras/issues) 中发布**漏洞报告和新功能请求**(仅限于此)。注意请先阅读[规范文档](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md)。
------------------
## 为什么取名为 Keras?
Keras (κέρας) 在希腊语中意为 *号角* 。它来自古希腊和拉丁文学中的一个文学形象,首先出现于 *《奥德赛》* 中, 梦神 (_Oneiroi_, singular _Oneiros_) 从这两类人中分离出来:那些用虚幻的景象欺骗人类,通过象牙之门抵达地球之人,以及那些宣告未来即将到来,通过号角之门抵达之人。 它类似于文字寓意,κέρας (号角) / κραίνω (履行),以及 ἐλέφας (象牙) / ἐλεφαίρομαι (欺骗)。
Keras 最初是作为 ONEIROS 项目(开放式神经电子智能机器人操作系统)研究工作的一部分而开发的。
>_"Oneiroi 超出了我们的理解 - 谁能确定它们讲述了什么故事?并不是所有人都能找到。那里有两扇门,就是通往短暂的 Oneiroi 的通道;一个是用号角制造的,一个是用象牙制造的。穿过尖锐的象牙的 Oneiroi 是诡计多端的,他们带有一些不会实现的信息; 那些穿过抛光的喇叭出来的人背后具有真理,对于看到他们的人来说是完成的。"_ Homer, Odyssey 19. 562 ff (Shewring translation).
------------------
| keras-docs-zh/sources/index.md/0 | {
"file_path": "keras-docs-zh/sources/index.md",
"repo_id": "keras-docs-zh",
"token_count": 5448
} | 92 |
## 评价函数的用法
评价函数用于评估当前训练模型的性能。当模型编译后(compile),评价函数应该作为 `metrics` 的参数来输入。
```python
model.compile(loss='mean_squared_error',
optimizer='sgd',
metrics=['mae', 'acc'])
```
```python
from keras import metrics
model.compile(loss='mean_squared_error',
optimizer='sgd',
metrics=[metrics.mae, metrics.categorical_accuracy])
```
评价函数和 [损失函数](/losses) 相似,只不过评价函数的结果不会用于训练过程中。
我们可以传递已有的评价函数名称,或者传递一个自定义的 Theano/TensorFlow 函数来使用(查阅[自定义评价函数](#custom-metrics))。
__参数__
- __y_true__: 真实标签,Theano/Tensorflow 张量。
- __y_pred__: 预测值。和 y_true 相同尺寸的 Theano/TensorFlow 张量。
__返回__
返回一个表示全部数据点平均值的张量。
----
## 可使用的评价函数
### accuracy
```python
keras.metrics.accuracy(y_true, y_pred)
```
### binary_accuracy
```python
keras.metrics.binary_accuracy(y_true, y_pred, threshold=0.5)
```
----
### categorical_accuracy
```python
keras.metrics.categorical_accuracy(y_true, y_pred)
```
----
### sparse_categorical_accuracy
```python
keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
```
----
### top_k_categorical_accuracy
```python
keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=5)
```
----
### sparse_top_k_categorical_accuracy
```python
keras.metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=5)
```
----
### cosine_proximity
```python
keras.metrics.cosine_proximity(y_true, y_pred, axis=-1)
```
----
### clone_metric
```python
keras.metrics.clone_metric(metric)
```
若有状态,返回评估指标的克隆,否则返回其本身。
----
### clone_metrics
```python
keras.metrics.clone_metrics(metrics)
```
克隆给定的评估指标序列/字典。
除以上评估指标,你还可以使用在损失函数页描述的损失函数作为评估指标。
----
## 自定义评价函数
自定义评价函数应该在编译的时候(compile)传递进去。该函数需要以 `(y_true, y_pred)` 作为输入参数,并返回一个张量作为输出结果。
```python
import keras.backend as K
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', mean_pred])
```
| keras-docs-zh/sources/metrics.md/0 | {
"file_path": "keras-docs-zh/sources/metrics.md",
"repo_id": "keras-docs-zh",
"token_count": 1391
} | 93 |
<jupyter_start><jupyter_text>Automatic Speech Recognition using CTC**Authors:** [Mohamed Reda Bouadjenek](https://rbouadjenek.github.io/) and [Ngoc Dung Huynh](https://www.linkedin.com/in/parkerhuynh/)**Date created:** 2021/09/26**Last modified:** 2021/09/26**Description:** Training a CTC-based model for automatic speech recognition. IntroductionSpeech recognition is an interdisciplinary subfield of computer scienceand computational linguistics that develops methodologies and technologiesthat enable the recognition and translation of spoken language into textby computers. It is also known as automatic speech recognition (ASR),computer speech recognition or speech to text (STT). It incorporatesknowledge and research in the computer science, linguistics and computerengineering fields.This demonstration shows how to combine a 2D CNN, RNN and a ConnectionistTemporal Classification (CTC) loss to build an ASR. CTC is an algorithmused to train deep neural networks in speech recognition, handwritingrecognition and other sequence problems. CTC is used when we don’t knowhow the input aligns with the output (how the characters in the transcriptalign to the audio). The model we create is similar to[DeepSpeech2](https://nvidia.github.io/OpenSeq2Seq/html/speech-recognition/deepspeech2.html).We will use the LJSpeech dataset from the[LibriVox](https://librivox.org/) project. It consists of shortaudio clips of a single speaker reading passages from 7 non-fiction books.We will evaluate the quality of the model using[Word Error Rate (WER)](https://en.wikipedia.org/wiki/Word_error_rate).WER is obtained by adding upthe substitutions, insertions, and deletions that occur in a sequence ofrecognized words. Divide that number by the total number of words originallyspoken. The result is the WER. To get the WER score you need to install the[jiwer](https://pypi.org/project/jiwer/) package. You can use the following command line:```pip install jiwer```**References:**- [LJSpeech Dataset](https://keithito.com/LJ-Speech-Dataset/)- [Speech recognition](https://en.wikipedia.org/wiki/Speech_recognition)- [Sequence Modeling With CTC](https://distill.pub/2017/ctc/)- [DeepSpeech2](https://nvidia.github.io/OpenSeq2Seq/html/speech-recognition/deepspeech2.html) Setup<jupyter_code>import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from IPython import display
from jiwer import wer<jupyter_output><empty_output><jupyter_text>Load the LJSpeech DatasetLet's download the [LJSpeech Dataset](https://keithito.com/LJ-Speech-Dataset/).The dataset contains 13,100 audio files as `wav` files in the `/wavs/` folder.The label (transcript) for each audio file is a stringgiven in the `metadata.csv` file. The fields are:- **ID**: this is the name of the corresponding .wav file- **Transcription**: words spoken by the reader (UTF-8)- **Normalized transcription**: transcription with numbers,ordinals, and monetary units expanded into full words (UTF-8).For this demo we will use on the "Normalized transcription" field.Each audio file is a single-channel 16-bit PCM WAV with a sample rate of 22,050 Hz.<jupyter_code>data_url = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"
data_path = keras.utils.get_file("LJSpeech-1.1", data_url, untar=True)
wavs_path = data_path + "/wavs/"
metadata_path = data_path + "/metadata.csv"
# Read metadata file and parse it
metadata_df = pd.read_csv(metadata_path, sep="|", header=None, quoting=3)
metadata_df.columns = ["file_name", "transcription", "normalized_transcription"]
metadata_df = metadata_df[["file_name", "normalized_transcription"]]
metadata_df = metadata_df.sample(frac=1).reset_index(drop=True)
metadata_df.head(3)<jupyter_output><empty_output><jupyter_text>We now split the data into training and validation set.<jupyter_code>split = int(len(metadata_df) * 0.90)
df_train = metadata_df[:split]
df_val = metadata_df[split:]
print(f"Size of the training set: {len(df_train)}")
print(f"Size of the training set: {len(df_val)}")<jupyter_output><empty_output><jupyter_text>PreprocessingWe first prepare the vocabulary to be used.<jupyter_code># The set of characters accepted in the transcription.
characters = [x for x in "abcdefghijklmnopqrstuvwxyz'?! "]
# Mapping characters to integers
char_to_num = keras.layers.StringLookup(vocabulary=characters, oov_token="")
# Mapping integers back to original characters
num_to_char = keras.layers.StringLookup(
vocabulary=char_to_num.get_vocabulary(), oov_token="", invert=True
)
print(
f"The vocabulary is: {char_to_num.get_vocabulary()} "
f"(size ={char_to_num.vocabulary_size()})"
)<jupyter_output><empty_output><jupyter_text>Next, we create the function that describes the transformation that we apply to eachelement of our dataset.<jupyter_code># An integer scalar Tensor. The window length in samples.
frame_length = 256
# An integer scalar Tensor. The number of samples to step.
frame_step = 160
# An integer scalar Tensor. The size of the FFT to apply.
# If not provided, uses the smallest power of 2 enclosing frame_length.
fft_length = 384
def encode_single_sample(wav_file, label):
###########################################
## Process the Audio
##########################################
# 1. Read wav file
file = tf.io.read_file(wavs_path + wav_file + ".wav")
# 2. Decode the wav file
audio, _ = tf.audio.decode_wav(file)
audio = tf.squeeze(audio, axis=-1)
# 3. Change type to float
audio = tf.cast(audio, tf.float32)
# 4. Get the spectrogram
spectrogram = tf.signal.stft(
audio, frame_length=frame_length, frame_step=frame_step, fft_length=fft_length
)
# 5. We only need the magnitude, which can be derived by applying tf.abs
spectrogram = tf.abs(spectrogram)
spectrogram = tf.math.pow(spectrogram, 0.5)
# 6. normalisation
means = tf.math.reduce_mean(spectrogram, 1, keepdims=True)
stddevs = tf.math.reduce_std(spectrogram, 1, keepdims=True)
spectrogram = (spectrogram - means) / (stddevs + 1e-10)
###########################################
## Process the label
##########################################
# 7. Convert label to Lower case
label = tf.strings.lower(label)
# 8. Split the label
label = tf.strings.unicode_split(label, input_encoding="UTF-8")
# 9. Map the characters in label to numbers
label = char_to_num(label)
# 10. Return a dict as our model is expecting two inputs
return spectrogram, label<jupyter_output><empty_output><jupyter_text>Creating `Dataset` objectsWe create a `tf.data.Dataset` object that yieldsthe transformed elements, in the same order as theyappeared in the input.<jupyter_code>batch_size = 32
# Define the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices(
(list(df_train["file_name"]), list(df_train["normalized_transcription"]))
)
train_dataset = (
train_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE)
.padded_batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
# Define the validation dataset
validation_dataset = tf.data.Dataset.from_tensor_slices(
(list(df_val["file_name"]), list(df_val["normalized_transcription"]))
)
validation_dataset = (
validation_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE)
.padded_batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)<jupyter_output><empty_output><jupyter_text>Visualize the dataLet's visualize an example in our dataset, including theaudio clip, the spectrogram and the corresponding label.<jupyter_code>fig = plt.figure(figsize=(8, 5))
for batch in train_dataset.take(1):
spectrogram = batch[0][0].numpy()
spectrogram = np.array([np.trim_zeros(x) for x in np.transpose(spectrogram)])
label = batch[1][0]
# Spectrogram
label = tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")
ax = plt.subplot(2, 1, 1)
ax.imshow(spectrogram, vmax=1)
ax.set_title(label)
ax.axis("off")
# Wav
file = tf.io.read_file(wavs_path + list(df_train["file_name"])[0] + ".wav")
audio, _ = tf.audio.decode_wav(file)
audio = audio.numpy()
ax = plt.subplot(2, 1, 2)
plt.plot(audio)
ax.set_title("Signal Wave")
ax.set_xlim(0, len(audio))
display.display(display.Audio(np.transpose(audio), rate=16000))
plt.show()<jupyter_output><empty_output><jupyter_text>ModelWe first define the CTC Loss function.<jupyter_code>def CTCLoss(y_true, y_pred):
# Compute the training-time loss value
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64")
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64")
loss = keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)
return loss<jupyter_output><empty_output><jupyter_text>We now define our model. We will define a model similar to[DeepSpeech2](https://nvidia.github.io/OpenSeq2Seq/html/speech-recognition/deepspeech2.html).<jupyter_code>def build_model(input_dim, output_dim, rnn_layers=5, rnn_units=128):
"""Model similar to DeepSpeech2."""
# Model's input
input_spectrogram = layers.Input((None, input_dim), name="input")
# Expand the dimension to use 2D CNN.
x = layers.Reshape((-1, input_dim, 1), name="expand_dim")(input_spectrogram)
# Convolution layer 1
x = layers.Conv2D(
filters=32,
kernel_size=[11, 41],
strides=[2, 2],
padding="same",
use_bias=False,
name="conv_1",
)(x)
x = layers.BatchNormalization(name="conv_1_bn")(x)
x = layers.ReLU(name="conv_1_relu")(x)
# Convolution layer 2
x = layers.Conv2D(
filters=32,
kernel_size=[11, 21],
strides=[1, 2],
padding="same",
use_bias=False,
name="conv_2",
)(x)
x = layers.BatchNormalization(name="conv_2_bn")(x)
x = layers.ReLU(name="conv_2_relu")(x)
# Reshape the resulted volume to feed the RNNs layers
x = layers.Reshape((-1, x.shape[-2] * x.shape[-1]))(x)
# RNN layers
for i in range(1, rnn_layers + 1):
recurrent = layers.GRU(
units=rnn_units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
return_sequences=True,
reset_after=True,
name=f"gru_{i}",
)
x = layers.Bidirectional(
recurrent, name=f"bidirectional_{i}", merge_mode="concat"
)(x)
if i < rnn_layers:
x = layers.Dropout(rate=0.5)(x)
# Dense layer
x = layers.Dense(units=rnn_units * 2, name="dense_1")(x)
x = layers.ReLU(name="dense_1_relu")(x)
x = layers.Dropout(rate=0.5)(x)
# Classification layer
output = layers.Dense(units=output_dim + 1, activation="softmax")(x)
# Model
model = keras.Model(input_spectrogram, output, name="DeepSpeech_2")
# Optimizer
opt = keras.optimizers.Adam(learning_rate=1e-4)
# Compile the model and return
model.compile(optimizer=opt, loss=CTCLoss)
return model
# Get the model
model = build_model(
input_dim=fft_length // 2 + 1,
output_dim=char_to_num.vocabulary_size(),
rnn_units=512,
)
model.summary(line_length=110)<jupyter_output><empty_output><jupyter_text>Training and Evaluating<jupyter_code># A utility function to decode the output of the network
def decode_batch_predictions(pred):
input_len = np.ones(pred.shape[0]) * pred.shape[1]
# Use greedy search. For complex tasks, you can use beam search
results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0]
# Iterate over the results and get back the text
output_text = []
for result in results:
result = tf.strings.reduce_join(num_to_char(result)).numpy().decode("utf-8")
output_text.append(result)
return output_text
# A callback class to output a few transcriptions during training
class CallbackEval(keras.callbacks.Callback):
"""Displays a batch of outputs after every epoch."""
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def on_epoch_end(self, epoch: int, logs=None):
predictions = []
targets = []
for batch in self.dataset:
X, y = batch
batch_predictions = model.predict(X)
batch_predictions = decode_batch_predictions(batch_predictions)
predictions.extend(batch_predictions)
for label in y:
label = (
tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")
)
targets.append(label)
wer_score = wer(targets, predictions)
print("-" * 100)
print(f"Word Error Rate: {wer_score:.4f}")
print("-" * 100)
for i in np.random.randint(0, len(predictions), 2):
print(f"Target : {targets[i]}")
print(f"Prediction: {predictions[i]}")
print("-" * 100)<jupyter_output><empty_output><jupyter_text>Let's start the training process.<jupyter_code># Define the number of epochs.
epochs = 1
# Callback function to check transcription on the val set.
validation_callback = CallbackEval(validation_dataset)
# Train the model
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=epochs,
callbacks=[validation_callback],
)<jupyter_output><empty_output><jupyter_text>Inference<jupyter_code># Let's check results on more validation samples
predictions = []
targets = []
for batch in validation_dataset:
X, y = batch
batch_predictions = model.predict(X)
batch_predictions = decode_batch_predictions(batch_predictions)
predictions.extend(batch_predictions)
for label in y:
label = tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")
targets.append(label)
wer_score = wer(targets, predictions)
print("-" * 100)
print(f"Word Error Rate: {wer_score:.4f}")
print("-" * 100)
for i in np.random.randint(0, len(predictions), 5):
print(f"Target : {targets[i]}")
print(f"Prediction: {predictions[i]}")
print("-" * 100)<jupyter_output><empty_output> | keras-io/examples/audio/ipynb/ctc_asr.ipynb/0 | {
"file_path": "keras-io/examples/audio/ipynb/ctc_asr.ipynb",
"repo_id": "keras-io",
"token_count": 5436
} | 94 |
<jupyter_start><jupyter_text>Drug Molecule Generation with VAE**Author:** [Victor Basu](https://www.linkedin.com/in/victor-basu-520958147)**Date created:** 2022/03/10**Last modified:** 2022/03/24**Description:** Implementing a Convolutional Variational AutoEncoder (VAE) for Drug Discovery. IntroductionIn this example, we use a Variational Autoencoder to generate molecules for drug discovery.We use the research papers[Automatic chemical design using a data-driven continuous representation of molecules](https://arxiv.org/abs/1610.02415)and [MolGAN: An implicit generative model for small molecular graphs](https://arxiv.org/abs/1805.11973)as a reference.The model described in the paper **Automatic chemical design using a data-drivencontinuous representation of molecules** generates new molecules via efficient explorationof open-ended spaces of chemical compounds. The model consists ofthree components: Encoder, Decoder and Predictor. The Encoder converts the discreterepresentation of a molecule into a real-valued continuous vector, and the Decoderconverts these continuous vectors back to discrete molecule representations. ThePredictor estimates chemical properties from the latent continuous vector representationof the molecule. Continuous representations allow the use of gradient-basedoptimization to efficiently guide the search for optimized functional compounds.**Figure (a)** - A diagram of the autoencoder used for molecule design, including thejoint property prediction model. Starting from a discrete molecule representation, suchas a SMILES string, the encoder network converts each molecule into a vector in thelatent space, which is effectively a continuous molecule representation. Given a pointin the latent space, the decoder network produces a corresponding SMILES string. Amultilayer perceptron network estimates the value of target properties associated witheach molecule.**Figure (b)** - Gradient-based optimization in continuous latent space. After training asurrogate model `f(z)` to predict the properties of molecules based on their latentrepresentation `z`, we can optimize `f(z)` with respect to `z` to find new latentrepresentations expected to match specific desired properties. These new latentrepresentations can then be decoded into SMILES strings, at which point their propertiescan be tested empirically.For an explanation and implementation of MolGAN, please refer to the Keras Example[**WGAN-GP with R-GCN for the generation of small molecular graphs**](https://bit.ly/3pU6zXK) byAlexander Kensert. Many of the functions used in the present example are from the above Keras example. SetupRDKit is an open source toolkit for cheminformatics and machine learning. This toolkit come in handyif one is into drug discovery domain. In this example, RDKit is used to convenientlyand efficiently transform SMILES to molecule objects, and then from those obtain sets of atomsand bonds.Quoting from[WGAN-GP with R-GCN for the generation of small molecular graphs](https://keras.io/examples/generative/wgan-graphs/)):**"SMILES expresses the structure of a given molecule in the form of an ASCII string.The SMILES string is a compact encoding which, for smaller molecules, is relatively human-readable.Encoding molecules as a string both alleviates and facilitates database and/or web searchingof a given molecule. RDKit uses algorithms to accurately transform a given SMILES toa molecule object, which can then be used to compute a great number of molecular properties/features."**<jupyter_code>!pip -q install rdkit-pypi==2021.9.4
import ast
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from rdkit import Chem, RDLogger
from rdkit.Chem import BondType
from rdkit.Chem.Draw import MolsToGridImage
RDLogger.DisableLog("rdApp.*")<jupyter_output><empty_output><jupyter_text>DatasetWe use the [**ZINC – A Free Database of Commercially Available Compounds forVirtual Screening**](https://bit.ly/3IVBI4x) dataset. The dataset comes with moleculeformula in SMILE representation along with their respective molecular properties such as**logP** (water–octanal partition coefficient), **SAS** (syntheticaccessibility score) and **QED** (Qualitative Estimate of Drug-likeness).<jupyter_code>csv_path = keras.utils.get_file(
"/content/250k_rndm_zinc_drugs_clean_3.csv",
"https://raw.githubusercontent.com/aspuru-guzik-group/chemical_vae/master/models/zinc_properties/250k_rndm_zinc_drugs_clean_3.csv",
)
df = pd.read_csv("/content/250k_rndm_zinc_drugs_clean_3.csv")
df["smiles"] = df["smiles"].apply(lambda s: s.replace("\n", ""))
df.head()<jupyter_output><empty_output><jupyter_text>Hyperparameters<jupyter_code>SMILE_CHARSET = '["C", "B", "F", "I", "H", "O", "N", "S", "P", "Cl", "Br"]'
bond_mapping = {"SINGLE": 0, "DOUBLE": 1, "TRIPLE": 2, "AROMATIC": 3}
bond_mapping.update(
{0: BondType.SINGLE, 1: BondType.DOUBLE, 2: BondType.TRIPLE, 3: BondType.AROMATIC}
)
SMILE_CHARSET = ast.literal_eval(SMILE_CHARSET)
MAX_MOLSIZE = max(df["smiles"].str.len())
SMILE_to_index = dict((c, i) for i, c in enumerate(SMILE_CHARSET))
index_to_SMILE = dict((i, c) for i, c in enumerate(SMILE_CHARSET))
atom_mapping = dict(SMILE_to_index)
atom_mapping.update(index_to_SMILE)
BATCH_SIZE = 100
EPOCHS = 10
VAE_LR = 5e-4
NUM_ATOMS = 120 # Maximum number of atoms
ATOM_DIM = len(SMILE_CHARSET) # Number of atom types
BOND_DIM = 4 + 1 # Number of bond types
LATENT_DIM = 435 # Size of the latent space
def smiles_to_graph(smiles):
# Converts SMILES to molecule object
molecule = Chem.MolFromSmiles(smiles)
# Initialize adjacency and feature tensor
adjacency = np.zeros((BOND_DIM, NUM_ATOMS, NUM_ATOMS), "float32")
features = np.zeros((NUM_ATOMS, ATOM_DIM), "float32")
# loop over each atom in molecule
for atom in molecule.GetAtoms():
i = atom.GetIdx()
atom_type = atom_mapping[atom.GetSymbol()]
features[i] = np.eye(ATOM_DIM)[atom_type]
# loop over one-hop neighbors
for neighbor in atom.GetNeighbors():
j = neighbor.GetIdx()
bond = molecule.GetBondBetweenAtoms(i, j)
bond_type_idx = bond_mapping[bond.GetBondType().name]
adjacency[bond_type_idx, [i, j], [j, i]] = 1
# Where no bond, add 1 to last channel (indicating "non-bond")
# Notice: channels-first
adjacency[-1, np.sum(adjacency, axis=0) == 0] = 1
# Where no atom, add 1 to last column (indicating "non-atom")
features[np.where(np.sum(features, axis=1) == 0)[0], -1] = 1
return adjacency, features
def graph_to_molecule(graph):
# Unpack graph
adjacency, features = graph
# RWMol is a molecule object intended to be edited
molecule = Chem.RWMol()
# Remove "no atoms" & atoms with no bonds
keep_idx = np.where(
(np.argmax(features, axis=1) != ATOM_DIM - 1)
& (np.sum(adjacency[:-1], axis=(0, 1)) != 0)
)[0]
features = features[keep_idx]
adjacency = adjacency[:, keep_idx, :][:, :, keep_idx]
# Add atoms to molecule
for atom_type_idx in np.argmax(features, axis=1):
atom = Chem.Atom(atom_mapping[atom_type_idx])
_ = molecule.AddAtom(atom)
# Add bonds between atoms in molecule; based on the upper triangles
# of the [symmetric] adjacency tensor
(bonds_ij, atoms_i, atoms_j) = np.where(np.triu(adjacency) == 1)
for (bond_ij, atom_i, atom_j) in zip(bonds_ij, atoms_i, atoms_j):
if atom_i == atom_j or bond_ij == BOND_DIM - 1:
continue
bond_type = bond_mapping[bond_ij]
molecule.AddBond(int(atom_i), int(atom_j), bond_type)
# Sanitize the molecule; for more information on sanitization, see
# https://www.rdkit.org/docs/RDKit_Book.html#molecular-sanitization
flag = Chem.SanitizeMol(molecule, catchErrors=True)
# Let's be strict. If sanitization fails, return None
if flag != Chem.SanitizeFlags.SANITIZE_NONE:
return None
return molecule<jupyter_output><empty_output><jupyter_text>Generate training set<jupyter_code>train_df = df.sample(frac=0.75, random_state=42) # random state is a seed value
train_df.reset_index(drop=True, inplace=True)
adjacency_tensor, feature_tensor, qed_tensor = [], [], []
for idx in range(8000):
adjacency, features = smiles_to_graph(train_df.loc[idx]["smiles"])
qed = train_df.loc[idx]["qed"]
adjacency_tensor.append(adjacency)
feature_tensor.append(features)
qed_tensor.append(qed)
adjacency_tensor = np.array(adjacency_tensor)
feature_tensor = np.array(feature_tensor)
qed_tensor = np.array(qed_tensor)
class RelationalGraphConvLayer(keras.layers.Layer):
def __init__(
self,
units=128,
activation="relu",
use_bias=False,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
**kwargs
):
super().__init__(**kwargs)
self.units = units
self.activation = keras.activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)
self.bias_regularizer = keras.regularizers.get(bias_regularizer)
def build(self, input_shape):
bond_dim = input_shape[0][1]
atom_dim = input_shape[1][2]
self.kernel = self.add_weight(
shape=(bond_dim, atom_dim, self.units),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
name="W",
dtype=tf.float32,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(bond_dim, 1, self.units),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
name="b",
dtype=tf.float32,
)
self.built = True
def call(self, inputs, training=False):
adjacency, features = inputs
# Aggregate information from neighbors
x = tf.matmul(adjacency, features[:, None, :, :])
# Apply linear transformation
x = tf.matmul(x, self.kernel)
if self.use_bias:
x += self.bias
# Reduce bond types dim
x_reduced = tf.reduce_sum(x, axis=1)
# Apply non-linear transformation
return self.activation(x_reduced)<jupyter_output><empty_output><jupyter_text>Build the Encoder and DecoderThe Encoder takes as input a molecule's graph adjacency matrix and feature matrix.These features are processed via a Graph Convolution layer, then are flattened andprocessed by several Dense layers to derive `z_mean` and `log_var`, thelatent-space representation of the molecule.**Graph Convolution layer**: The relational graph convolution layer implementsnon-linearly transformed neighbourhood aggregations. We can define these layers asfollows:`H_hat**(l+1) = σ(D_hat**(-1) * A_hat * H_hat**(l+1) * W**(l))`Where `σ` denotes the non-linear transformation (commonly a ReLU activation), `A` theadjacency tensor, `H_hat**(l)` the feature tensor at the `l-th` layer, `D_hat**(-1)` theinverse diagonal degree tensor of `A_hat`, and `W_hat**(l)` the trainable weight tensorat the `l-th` layer. Specifically, for each bond type (relation), the degree tensorexpresses, in the diagonal, the number of bonds attached to each atom.Source:[WGAN-GP with R-GCN for the generation of small molecular graphs](https://keras.io/examples/generative/wgan-graphs/))The Decoder takes as input the latent-space representation and predictsthe graph adjacency matrix and feature matrix of the corresponding molecules.<jupyter_code>def get_encoder(
gconv_units, latent_dim, adjacency_shape, feature_shape, dense_units, dropout_rate
):
adjacency = keras.layers.Input(shape=adjacency_shape)
features = keras.layers.Input(shape=feature_shape)
# Propagate through one or more graph convolutional layers
features_transformed = features
for units in gconv_units:
features_transformed = RelationalGraphConvLayer(units)(
[adjacency, features_transformed]
)
# Reduce 2-D representation of molecule to 1-D
x = keras.layers.GlobalAveragePooling1D()(features_transformed)
# Propagate through one or more densely connected layers
for units in dense_units:
x = layers.Dense(units, activation="relu")(x)
x = layers.Dropout(dropout_rate)(x)
z_mean = layers.Dense(latent_dim, dtype="float32", name="z_mean")(x)
log_var = layers.Dense(latent_dim, dtype="float32", name="log_var")(x)
encoder = keras.Model([adjacency, features], [z_mean, log_var], name="encoder")
return encoder
def get_decoder(dense_units, dropout_rate, latent_dim, adjacency_shape, feature_shape):
latent_inputs = keras.Input(shape=(latent_dim,))
x = latent_inputs
for units in dense_units:
x = keras.layers.Dense(units, activation="tanh")(x)
x = keras.layers.Dropout(dropout_rate)(x)
# Map outputs of previous layer (x) to [continuous] adjacency tensors (x_adjacency)
x_adjacency = keras.layers.Dense(tf.math.reduce_prod(adjacency_shape))(x)
x_adjacency = keras.layers.Reshape(adjacency_shape)(x_adjacency)
# Symmetrify tensors in the last two dimensions
x_adjacency = (x_adjacency + tf.transpose(x_adjacency, (0, 1, 3, 2))) / 2
x_adjacency = keras.layers.Softmax(axis=1)(x_adjacency)
# Map outputs of previous layer (x) to [continuous] feature tensors (x_features)
x_features = keras.layers.Dense(tf.math.reduce_prod(feature_shape))(x)
x_features = keras.layers.Reshape(feature_shape)(x_features)
x_features = keras.layers.Softmax(axis=2)(x_features)
decoder = keras.Model(
latent_inputs, outputs=[x_adjacency, x_features], name="decoder"
)
return decoder<jupyter_output><empty_output><jupyter_text>Build the Sampling layer<jupyter_code>class Sampling(layers.Layer):
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_log_var)[0]
dim = tf.shape(z_log_var)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon<jupyter_output><empty_output><jupyter_text>Build the VAEThis model is trained to optimize four losses:* Categorical crossentropy* KL divergence loss* Property prediction loss* Graph loss (gradient penalty)The categorical crossentropy loss function measures the model'sreconstruction accuracy. The Property prediction loss estimates the mean squarederror between predicted and actual properties after running the latent representationthrough a property prediction model. The propertyprediction of the model is optimized via binary crossentropy. The gradientpenalty is further guided by the model's property (QED) prediction.A gradient penalty is an alternative soft constraint on the1-Lipschitz continuity as an improvement upon the gradient clipping scheme from theoriginal neural network("1-Lipschitz continuity" means that the norm of the gradient is at most 1 at evey singlepoint of the function).It adds a regularization term to the loss function.<jupyter_code>class MoleculeGenerator(keras.Model):
def __init__(self, encoder, decoder, max_len, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.property_prediction_layer = layers.Dense(1)
self.max_len = max_len
self.train_total_loss_tracker = keras.metrics.Mean(name="train_total_loss")
self.val_total_loss_tracker = keras.metrics.Mean(name="val_total_loss")
def train_step(self, data):
adjacency_tensor, feature_tensor, qed_tensor = data[0]
graph_real = [adjacency_tensor, feature_tensor]
self.batch_size = tf.shape(qed_tensor)[0]
with tf.GradientTape() as tape:
z_mean, z_log_var, qed_pred, gen_adjacency, gen_features = self(
graph_real, training=True
)
graph_generated = [gen_adjacency, gen_features]
total_loss = self._compute_loss(
z_log_var, z_mean, qed_tensor, qed_pred, graph_real, graph_generated
)
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
self.train_total_loss_tracker.update_state(total_loss)
return {"loss": self.train_total_loss_tracker.result()}
def _compute_loss(
self, z_log_var, z_mean, qed_true, qed_pred, graph_real, graph_generated
):
adjacency_real, features_real = graph_real
adjacency_gen, features_gen = graph_generated
adjacency_loss = tf.reduce_mean(
tf.reduce_sum(
keras.losses.categorical_crossentropy(adjacency_real, adjacency_gen),
axis=(1, 2),
)
)
features_loss = tf.reduce_mean(
tf.reduce_sum(
keras.losses.categorical_crossentropy(features_real, features_gen),
axis=(1),
)
)
kl_loss = -0.5 * tf.reduce_sum(
1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var), 1
)
kl_loss = tf.reduce_mean(kl_loss)
property_loss = tf.reduce_mean(
keras.losses.binary_crossentropy(qed_true, qed_pred)
)
graph_loss = self._gradient_penalty(graph_real, graph_generated)
return kl_loss + property_loss + graph_loss + adjacency_loss + features_loss
def _gradient_penalty(self, graph_real, graph_generated):
# Unpack graphs
adjacency_real, features_real = graph_real
adjacency_generated, features_generated = graph_generated
# Generate interpolated graphs (adjacency_interp and features_interp)
alpha = tf.random.uniform([self.batch_size])
alpha = tf.reshape(alpha, (self.batch_size, 1, 1, 1))
adjacency_interp = (adjacency_real * alpha) + (1 - alpha) * adjacency_generated
alpha = tf.reshape(alpha, (self.batch_size, 1, 1))
features_interp = (features_real * alpha) + (1 - alpha) * features_generated
# Compute the logits of interpolated graphs
with tf.GradientTape() as tape:
tape.watch(adjacency_interp)
tape.watch(features_interp)
_, _, logits, _, _ = self(
[adjacency_interp, features_interp], training=True
)
# Compute the gradients with respect to the interpolated graphs
grads = tape.gradient(logits, [adjacency_interp, features_interp])
# Compute the gradient penalty
grads_adjacency_penalty = (1 - tf.norm(grads[0], axis=1)) ** 2
grads_features_penalty = (1 - tf.norm(grads[1], axis=2)) ** 2
return tf.reduce_mean(
tf.reduce_mean(grads_adjacency_penalty, axis=(-2, -1))
+ tf.reduce_mean(grads_features_penalty, axis=(-1))
)
def inference(self, batch_size):
z = tf.random.normal((batch_size, LATENT_DIM))
reconstruction_adjacency, reconstruction_features = model.decoder.predict(z)
# obtain one-hot encoded adjacency tensor
adjacency = tf.argmax(reconstruction_adjacency, axis=1)
adjacency = tf.one_hot(adjacency, depth=BOND_DIM, axis=1)
# Remove potential self-loops from adjacency
adjacency = tf.linalg.set_diag(adjacency, tf.zeros(tf.shape(adjacency)[:-1]))
# obtain one-hot encoded feature tensor
features = tf.argmax(reconstruction_features, axis=2)
features = tf.one_hot(features, depth=ATOM_DIM, axis=2)
return [
graph_to_molecule([adjacency[i].numpy(), features[i].numpy()])
for i in range(batch_size)
]
def call(self, inputs):
z_mean, log_var = self.encoder(inputs)
z = Sampling()([z_mean, log_var])
gen_adjacency, gen_features = self.decoder(z)
property_pred = self.property_prediction_layer(z_mean)
return z_mean, log_var, property_pred, gen_adjacency, gen_features<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>vae_optimizer = tf.keras.optimizers.Adam(learning_rate=VAE_LR)
encoder = get_encoder(
gconv_units=[9],
adjacency_shape=(BOND_DIM, NUM_ATOMS, NUM_ATOMS),
feature_shape=(NUM_ATOMS, ATOM_DIM),
latent_dim=LATENT_DIM,
dense_units=[512],
dropout_rate=0.0,
)
decoder = get_decoder(
dense_units=[128, 256, 512],
dropout_rate=0.2,
latent_dim=LATENT_DIM,
adjacency_shape=(BOND_DIM, NUM_ATOMS, NUM_ATOMS),
feature_shape=(NUM_ATOMS, ATOM_DIM),
)
model = MoleculeGenerator(encoder, decoder, MAX_MOLSIZE)
model.compile(vae_optimizer)
history = model.fit([adjacency_tensor, feature_tensor, qed_tensor], epochs=EPOCHS)<jupyter_output><empty_output><jupyter_text>InferenceWe use our model to generate new valid molecules from different points of the latent space. Generate unique Molecules with the model<jupyter_code>molecules = model.inference(1000)
MolsToGridImage(
[m for m in molecules if m is not None][:1000], molsPerRow=5, subImgSize=(260, 160)
)<jupyter_output><empty_output><jupyter_text>Display latent space clusters with respect to molecular properties (QAE)<jupyter_code>def plot_latent(vae, data, labels):
# display a 2D plot of the property in the latent space
z_mean, _ = vae.encoder.predict(data)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=labels)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.show()
plot_latent(model, [adjacency_tensor[:8000], feature_tensor[:8000]], qed_tensor[:8000])<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/molecule_generation.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/molecule_generation.ipynb",
"repo_id": "keras-io",
"token_count": 8506
} | 95 |
# CycleGAN
**Author:** [A_K_Nain](https://twitter.com/A_K_Nain)<br>
**Date created:** 2020/08/12<br>
**Last modified:** 2020/08/12<br>
**Description:** Implementation of CycleGAN.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/cyclegan.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/cyclegan.py)
---
## CycleGAN
CycleGAN is a model that aims to solve the image-to-image translation
problem. The goal of the image-to-image translation problem is to learn the
mapping between an input image and an output image using a training set of
aligned image pairs. However, obtaining paired examples isn't always feasible.
CycleGAN tries to learn this mapping without requiring paired input-output images,
using cycle-consistent adversarial networks.
- [Paper](https://arxiv.org/pdf/1703.10593.pdf)
- [Original implementation](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix)
---
## Setup
```python
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
autotune = tf.data.AUTOTUNE
```
---
## Prepare the dataset
In this example, we will be using the
[horse to zebra](https://www.tensorflow.org/datasets/catalog/cycle_gan#cycle_ganhorse2zebra)
dataset.
```python
# Load the horse-zebra dataset using tensorflow-datasets.
dataset, _ = tfds.load("cycle_gan/horse2zebra", with_info=True, as_supervised=True)
train_horses, train_zebras = dataset["trainA"], dataset["trainB"]
test_horses, test_zebras = dataset["testA"], dataset["testB"]
# Define the standard image size.
orig_img_size = (286, 286)
# Size of the random crops to be used during training.
input_img_size = (256, 256, 3)
# Weights initializer for the layers.
kernel_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
# Gamma initializer for instance normalization.
gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
buffer_size = 256
batch_size = 1
def normalize_img(img):
img = tf.cast(img, dtype=tf.float32)
# Map values in the range [-1, 1]
return (img / 127.5) - 1.0
def preprocess_train_image(img, label):
# Random flip
img = tf.image.random_flip_left_right(img)
# Resize to the original size first
img = tf.image.resize(img, [*orig_img_size])
# Random crop to 256X256
img = tf.image.random_crop(img, size=[*input_img_size])
# Normalize the pixel values in the range [-1, 1]
img = normalize_img(img)
return img
def preprocess_test_image(img, label):
# Only resizing and normalization for the test images.
img = tf.image.resize(img, [input_img_size[0], input_img_size[1]])
img = normalize_img(img)
return img
```
---
## Create `Dataset` objects
```python
# Apply the preprocessing operations to the training data
train_horses = (
train_horses.map(preprocess_train_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
train_zebras = (
train_zebras.map(preprocess_train_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
# Apply the preprocessing operations to the test data
test_horses = (
test_horses.map(preprocess_test_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
test_zebras = (
test_zebras.map(preprocess_test_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
```
---
## Visualize some samples
```python
_, ax = plt.subplots(4, 2, figsize=(10, 15))
for i, samples in enumerate(zip(train_horses.take(4), train_zebras.take(4))):
horse = (((samples[0][0] * 127.5) + 127.5).numpy()).astype(np.uint8)
zebra = (((samples[1][0] * 127.5) + 127.5).numpy()).astype(np.uint8)
ax[i, 0].imshow(horse)
ax[i, 1].imshow(zebra)
plt.show()
```

---
## Building blocks used in the CycleGAN generators and discriminators
```python
class ReflectionPadding2D(layers.Layer):
"""Implements Reflection Padding as a layer.
Args:
padding(tuple): Amount of padding for the
spatial dimensions.
Returns:
A padded tensor with the same type as the input tensor.
"""
def __init__(self, padding=(1, 1), **kwargs):
self.padding = tuple(padding)
super().__init__(**kwargs)
def call(self, input_tensor, mask=None):
padding_width, padding_height = self.padding
padding_tensor = [
[0, 0],
[padding_height, padding_height],
[padding_width, padding_width],
[0, 0],
]
return tf.pad(input_tensor, padding_tensor, mode="REFLECT")
def residual_block(
x,
activation,
kernel_initializer=kernel_init,
kernel_size=(3, 3),
strides=(1, 1),
padding="valid",
gamma_initializer=gamma_init,
use_bias=False,
):
dim = x.shape[-1]
input_tensor = x
x = ReflectionPadding2D()(input_tensor)
x = layers.Conv2D(
dim,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
x = activation(x)
x = ReflectionPadding2D()(x)
x = layers.Conv2D(
dim,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
x = layers.add([input_tensor, x])
return x
def downsample(
x,
filters,
activation,
kernel_initializer=kernel_init,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
gamma_initializer=gamma_init,
use_bias=False,
):
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
if activation:
x = activation(x)
return x
def upsample(
x,
filters,
activation,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
kernel_initializer=kernel_init,
gamma_initializer=gamma_init,
use_bias=False,
):
x = layers.Conv2DTranspose(
filters,
kernel_size,
strides=strides,
padding=padding,
kernel_initializer=kernel_initializer,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
if activation:
x = activation(x)
return x
```
---
## Build the generators
The generator consists of downsampling blocks: nine residual blocks
and upsampling blocks. The structure of the generator is the following:
```
c7s1-64 ==> Conv block with `relu` activation, filter size of 7
d128 ====|
|-> 2 downsampling blocks
d256 ====|
R256 ====|
R256 |
R256 |
R256 |
R256 |-> 9 residual blocks
R256 |
R256 |
R256 |
R256 ====|
u128 ====|
|-> 2 upsampling blocks
u64 ====|
c7s1-3 => Last conv block with `tanh` activation, filter size of 7.
```
```python
def get_resnet_generator(
filters=64,
num_downsampling_blocks=2,
num_residual_blocks=9,
num_upsample_blocks=2,
gamma_initializer=gamma_init,
name=None,
):
img_input = layers.Input(shape=input_img_size, name=name + "_img_input")
x = ReflectionPadding2D(padding=(3, 3))(img_input)
x = layers.Conv2D(filters, (7, 7), kernel_initializer=kernel_init, use_bias=False)(
x
)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
x = layers.Activation("relu")(x)
# Downsampling
for _ in range(num_downsampling_blocks):
filters *= 2
x = downsample(x, filters=filters, activation=layers.Activation("relu"))
# Residual blocks
for _ in range(num_residual_blocks):
x = residual_block(x, activation=layers.Activation("relu"))
# Upsampling
for _ in range(num_upsample_blocks):
filters //= 2
x = upsample(x, filters, activation=layers.Activation("relu"))
# Final block
x = ReflectionPadding2D(padding=(3, 3))(x)
x = layers.Conv2D(3, (7, 7), padding="valid")(x)
x = layers.Activation("tanh")(x)
model = keras.models.Model(img_input, x, name=name)
return model
```
---
## Build the discriminators
The discriminators implement the following architecture:
`C64->C128->C256->C512`
```python
def get_discriminator(
filters=64, kernel_initializer=kernel_init, num_downsampling=3, name=None
):
img_input = layers.Input(shape=input_img_size, name=name + "_img_input")
x = layers.Conv2D(
filters,
(4, 4),
strides=(2, 2),
padding="same",
kernel_initializer=kernel_initializer,
)(img_input)
x = layers.LeakyReLU(0.2)(x)
num_filters = filters
for num_downsample_block in range(3):
num_filters *= 2
if num_downsample_block < 2:
x = downsample(
x,
filters=num_filters,
activation=layers.LeakyReLU(0.2),
kernel_size=(4, 4),
strides=(2, 2),
)
else:
x = downsample(
x,
filters=num_filters,
activation=layers.LeakyReLU(0.2),
kernel_size=(4, 4),
strides=(1, 1),
)
x = layers.Conv2D(
1, (4, 4), strides=(1, 1), padding="same", kernel_initializer=kernel_initializer
)(x)
model = keras.models.Model(inputs=img_input, outputs=x, name=name)
return model
# Get the generators
gen_G = get_resnet_generator(name="generator_G")
gen_F = get_resnet_generator(name="generator_F")
# Get the discriminators
disc_X = get_discriminator(name="discriminator_X")
disc_Y = get_discriminator(name="discriminator_Y")
```
---
## Build the CycleGAN model
We will override the `train_step()` method of the `Model` class
for training via `fit()`.
```python
class CycleGan(keras.Model):
def __init__(
self,
generator_G,
generator_F,
discriminator_X,
discriminator_Y,
lambda_cycle=10.0,
lambda_identity=0.5,
):
super().__init__()
self.gen_G = generator_G
self.gen_F = generator_F
self.disc_X = discriminator_X
self.disc_Y = discriminator_Y
self.lambda_cycle = lambda_cycle
self.lambda_identity = lambda_identity
def compile(
self,
gen_G_optimizer,
gen_F_optimizer,
disc_X_optimizer,
disc_Y_optimizer,
gen_loss_fn,
disc_loss_fn,
):
super().compile()
self.gen_G_optimizer = gen_G_optimizer
self.gen_F_optimizer = gen_F_optimizer
self.disc_X_optimizer = disc_X_optimizer
self.disc_Y_optimizer = disc_Y_optimizer
self.generator_loss_fn = gen_loss_fn
self.discriminator_loss_fn = disc_loss_fn
self.cycle_loss_fn = keras.losses.MeanAbsoluteError()
self.identity_loss_fn = keras.losses.MeanAbsoluteError()
def train_step(self, batch_data):
# x is Horse and y is zebra
real_x, real_y = batch_data
# For CycleGAN, we need to calculate different
# kinds of losses for the generators and discriminators.
# We will perform the following steps here:
#
# 1. Pass real images through the generators and get the generated images
# 2. Pass the generated images back to the generators to check if we
# can predict the original image from the generated image.
# 3. Do an identity mapping of the real images using the generators.
# 4. Pass the generated images in 1) to the corresponding discriminators.
# 5. Calculate the generators total loss (adversarial + cycle + identity)
# 6. Calculate the discriminators loss
# 7. Update the weights of the generators
# 8. Update the weights of the discriminators
# 9. Return the losses in a dictionary
with tf.GradientTape(persistent=True) as tape:
# Horse to fake zebra
fake_y = self.gen_G(real_x, training=True)
# Zebra to fake horse -> y2x
fake_x = self.gen_F(real_y, training=True)
# Cycle (Horse to fake zebra to fake horse): x -> y -> x
cycled_x = self.gen_F(fake_y, training=True)
# Cycle (Zebra to fake horse to fake zebra) y -> x -> y
cycled_y = self.gen_G(fake_x, training=True)
# Identity mapping
same_x = self.gen_F(real_x, training=True)
same_y = self.gen_G(real_y, training=True)
# Discriminator output
disc_real_x = self.disc_X(real_x, training=True)
disc_fake_x = self.disc_X(fake_x, training=True)
disc_real_y = self.disc_Y(real_y, training=True)
disc_fake_y = self.disc_Y(fake_y, training=True)
# Generator adversarial loss
gen_G_loss = self.generator_loss_fn(disc_fake_y)
gen_F_loss = self.generator_loss_fn(disc_fake_x)
# Generator cycle loss
cycle_loss_G = self.cycle_loss_fn(real_y, cycled_y) * self.lambda_cycle
cycle_loss_F = self.cycle_loss_fn(real_x, cycled_x) * self.lambda_cycle
# Generator identity loss
id_loss_G = (
self.identity_loss_fn(real_y, same_y)
* self.lambda_cycle
* self.lambda_identity
)
id_loss_F = (
self.identity_loss_fn(real_x, same_x)
* self.lambda_cycle
* self.lambda_identity
)
# Total generator loss
total_loss_G = gen_G_loss + cycle_loss_G + id_loss_G
total_loss_F = gen_F_loss + cycle_loss_F + id_loss_F
# Discriminator loss
disc_X_loss = self.discriminator_loss_fn(disc_real_x, disc_fake_x)
disc_Y_loss = self.discriminator_loss_fn(disc_real_y, disc_fake_y)
# Get the gradients for the generators
grads_G = tape.gradient(total_loss_G, self.gen_G.trainable_variables)
grads_F = tape.gradient(total_loss_F, self.gen_F.trainable_variables)
# Get the gradients for the discriminators
disc_X_grads = tape.gradient(disc_X_loss, self.disc_X.trainable_variables)
disc_Y_grads = tape.gradient(disc_Y_loss, self.disc_Y.trainable_variables)
# Update the weights of the generators
self.gen_G_optimizer.apply_gradients(
zip(grads_G, self.gen_G.trainable_variables)
)
self.gen_F_optimizer.apply_gradients(
zip(grads_F, self.gen_F.trainable_variables)
)
# Update the weights of the discriminators
self.disc_X_optimizer.apply_gradients(
zip(disc_X_grads, self.disc_X.trainable_variables)
)
self.disc_Y_optimizer.apply_gradients(
zip(disc_Y_grads, self.disc_Y.trainable_variables)
)
return {
"G_loss": total_loss_G,
"F_loss": total_loss_F,
"D_X_loss": disc_X_loss,
"D_Y_loss": disc_Y_loss,
}
```
---
## Create a callback that periodically saves generated images
```python
class GANMonitor(keras.callbacks.Callback):
"""A callback to generate and save images after each epoch"""
def __init__(self, num_img=4):
self.num_img = num_img
def on_epoch_end(self, epoch, logs=None):
_, ax = plt.subplots(4, 2, figsize=(12, 12))
for i, img in enumerate(test_horses.take(self.num_img)):
prediction = self.model.gen_G(img)[0].numpy()
prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)
ax[i, 0].imshow(img)
ax[i, 1].imshow(prediction)
ax[i, 0].set_title("Input image")
ax[i, 1].set_title("Translated image")
ax[i, 0].axis("off")
ax[i, 1].axis("off")
prediction = keras.utils.array_to_img(prediction)
prediction.save(
"generated_img_{i}_{epoch}.png".format(i=i, epoch=epoch + 1)
)
plt.show()
plt.close()
```
---
## Train the end-to-end model
```python
# Loss function for evaluating adversarial loss
adv_loss_fn = keras.losses.MeanSquaredError()
# Define the loss function for the generators
def generator_loss_fn(fake):
fake_loss = adv_loss_fn(tf.ones_like(fake), fake)
return fake_loss
# Define the loss function for the discriminators
def discriminator_loss_fn(real, fake):
real_loss = adv_loss_fn(tf.ones_like(real), real)
fake_loss = adv_loss_fn(tf.zeros_like(fake), fake)
return (real_loss + fake_loss) * 0.5
# Create cycle gan model
cycle_gan_model = CycleGan(
generator_G=gen_G, generator_F=gen_F, discriminator_X=disc_X, discriminator_Y=disc_Y
)
# Compile the model
cycle_gan_model.compile(
gen_G_optimizer=keras.optimizers.legacy.Adam(learning_rate=2e-4, beta_1=0.5),
gen_F_optimizer=keras.optimizers.legacy.Adam(learning_rate=2e-4, beta_1=0.5),
disc_X_optimizer=keras.optimizers.legacy.Adam(learning_rate=2e-4, beta_1=0.5),
disc_Y_optimizer=keras.optimizers.legacy.Adam(learning_rate=2e-4, beta_1=0.5),
gen_loss_fn=generator_loss_fn,
disc_loss_fn=discriminator_loss_fn,
)
# Callbacks
plotter = GANMonitor()
checkpoint_filepath = "./model_checkpoints/cyclegan_checkpoints.{epoch:03d}"
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath, save_weights_only=True
)
# Here we will train the model for just one epoch as each epoch takes around
# 7 minutes on a single P100 backed machine.
cycle_gan_model.fit(
tf.data.Dataset.zip((train_horses, train_zebras)),
epochs=1,
callbacks=[plotter, model_checkpoint_callback],
)
```
<div class="k-default-codeblock">
```
1067/1067 [==============================] - ETA: 0s - G_loss: 4.4794 - F_loss: 4.1048 - D_X_loss: 0.1584 - D_Y_loss: 0.1233
```
</div>

<div class="k-default-codeblock">
```
1067/1067 [==============================] - 390s 366ms/step - G_loss: 4.4783 - F_loss: 4.1035 - D_X_loss: 0.1584 - D_Y_loss: 0.1232
<tensorflow.python.keras.callbacks.History at 0x7f4184326e90>
```
</div>
Test the performance of the model.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/CycleGAN) and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/CycleGAN).
```python
# This model was trained for 90 epochs. We will be loading those weights
# here. Once the weights are loaded, we will take a few samples from the test
# data and check the model's performance.
```
```python
!curl -LO https://github.com/AakashKumarNain/CycleGAN_TF2/releases/download/v1.0/saved_checkpoints.zip
!unzip -qq saved_checkpoints.zip
```
```python
# Load the checkpoints
weight_file = "./saved_checkpoints/cyclegan_checkpoints.090"
cycle_gan_model.load_weights(weight_file).expect_partial()
print("Weights loaded successfully")
_, ax = plt.subplots(4, 2, figsize=(10, 15))
for i, img in enumerate(test_horses.take(4)):
prediction = cycle_gan_model.gen_G(img, training=False)[0].numpy()
prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)
ax[i, 0].imshow(img)
ax[i, 1].imshow(prediction)
ax[i, 0].set_title("Input image")
ax[i, 0].set_title("Input image")
ax[i, 1].set_title("Translated image")
ax[i, 0].axis("off")
ax[i, 1].axis("off")
prediction = keras.utils.array_to_img(prediction)
prediction.save("predicted_img_{i}.png".format(i=i))
plt.tight_layout()
plt.show()
```
<div class="k-default-codeblock">
```
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 634 100 634 0 0 2874 0 --:--:-- --:--:-- --:--:-- 2881
100 273M 100 273M 0 0 1736k 0 0:02:41 0:02:41 --:--:-- 2049k
Weights loaded successfully
```
</div>

| keras-io/examples/generative/md/cyclegan.md/0 | {
"file_path": "keras-io/examples/generative/md/cyclegan.md",
"repo_id": "keras-io",
"token_count": 9184
} | 96 |
# Density estimation using Real NVP
**Authors:** [Mandolini Giorgio Maria](https://www.linkedin.com/in/giorgio-maria-mandolini-a2a1b71b4/), [Sanna Daniele](https://www.linkedin.com/in/daniele-sanna-338629bb/), [Zannini Quirini Giorgio](https://www.linkedin.com/in/giorgio-zannini-quirini-16ab181a0/)<br>
**Date created:** 2020/08/10<br>
**Last modified:** 2020/08/10<br>
**Description:** Estimating the density distribution of the "double moon" dataset.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/real_nvp.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/real_nvp.py)
---
## Introduction
The aim of this work is to map a simple distribution - which is easy to sample
and whose density is simple to estimate - to a more complex one learned from the data.
This kind of generative model is also known as "normalizing flow".
In order to do this, the model is trained via the maximum
likelihood principle, using the "change of variable" formula.
We will use an affine coupling function. We create it such that its inverse, as well as
the determinant of the Jacobian, are easy to obtain (more details in the referenced paper).
**Requirements:**
* Tensorflow 2.9.1
* Tensorflow probability 0.17.0
**Reference:**
[Density estimation using Real NVP](https://arxiv.org/abs/1605.08803)
---
## Setup
```python
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from sklearn.datasets import make_moons
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_probability as tfp
```
---
## Load the data
```python
data = make_moons(3000, noise=0.05)[0].astype("float32")
norm = layers.Normalization()
norm.adapt(data)
normalized_data = norm(data)
```
---
## Affine coupling layer
```python
# Creating a custom layer with keras API.
output_dim = 256
reg = 0.01
def Coupling(input_shape):
input = keras.layers.Input(shape=input_shape)
t_layer_1 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(input)
t_layer_2 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_1)
t_layer_3 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_2)
t_layer_4 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_3)
t_layer_5 = keras.layers.Dense(
input_shape, activation="linear", kernel_regularizer=regularizers.l2(reg)
)(t_layer_4)
s_layer_1 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(input)
s_layer_2 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_1)
s_layer_3 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_2)
s_layer_4 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_3)
s_layer_5 = keras.layers.Dense(
input_shape, activation="tanh", kernel_regularizer=regularizers.l2(reg)
)(s_layer_4)
return keras.Model(inputs=input, outputs=[s_layer_5, t_layer_5])
```
---
## Real NVP
```python
class RealNVP(keras.Model):
def __init__(self, num_coupling_layers):
super().__init__()
self.num_coupling_layers = num_coupling_layers
# Distribution of the latent space.
self.distribution = tfp.distributions.MultivariateNormalDiag(
loc=[0.0, 0.0], scale_diag=[1.0, 1.0]
)
self.masks = np.array(
[[0, 1], [1, 0]] * (num_coupling_layers // 2), dtype="float32"
)
self.loss_tracker = keras.metrics.Mean(name="loss")
self.layers_list = [Coupling(2) for i in range(num_coupling_layers)]
@property
def metrics(self):
"""List of the model's metrics.
We make sure the loss tracker is listed as part of `model.metrics`
so that `fit()` and `evaluate()` are able to `reset()` the loss tracker
at the start of each epoch and at the start of an `evaluate()` call.
"""
return [self.loss_tracker]
def call(self, x, training=True):
log_det_inv = 0
direction = 1
if training:
direction = -1
for i in range(self.num_coupling_layers)[::direction]:
x_masked = x * self.masks[i]
reversed_mask = 1 - self.masks[i]
s, t = self.layers_list[i](x_masked)
s *= reversed_mask
t *= reversed_mask
gate = (direction - 1) / 2
x = (
reversed_mask
* (x * tf.exp(direction * s) + direction * t * tf.exp(gate * s))
+ x_masked
)
log_det_inv += gate * tf.reduce_sum(s, [1])
return x, log_det_inv
# Log likelihood of the normal distribution plus the log determinant of the jacobian.
def log_loss(self, x):
y, logdet = self(x)
log_likelihood = self.distribution.log_prob(y) + logdet
return -tf.reduce_mean(log_likelihood)
def train_step(self, data):
with tf.GradientTape() as tape:
loss = self.log_loss(data)
g = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(g, self.trainable_variables))
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
def test_step(self, data):
loss = self.log_loss(data)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
```
---
## Model training
```python
model = RealNVP(num_coupling_layers=6)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001))
history = model.fit(
normalized_data, batch_size=256, epochs=300, verbose=2, validation_split=0.2
)
```
<div class="k-default-codeblock">
```
Epoch 1/300
10/10 - 2s - loss: 2.7104 - val_loss: 2.6385 - 2s/epoch - 248ms/step
Epoch 2/300
10/10 - 0s - loss: 2.5951 - val_loss: 2.5818 - 162ms/epoch - 16ms/step
Epoch 3/300
10/10 - 0s - loss: 2.5487 - val_loss: 2.5299 - 165ms/epoch - 17ms/step
Epoch 4/300
10/10 - 0s - loss: 2.5081 - val_loss: 2.4989 - 150ms/epoch - 15ms/step
Epoch 5/300
10/10 - 0s - loss: 2.4729 - val_loss: 2.4641 - 168ms/epoch - 17ms/step
Epoch 6/300
10/10 - 0s - loss: 2.4457 - val_loss: 2.4443 - 155ms/epoch - 16ms/step
Epoch 7/300
10/10 - 0s - loss: 2.4183 - val_loss: 2.4078 - 155ms/epoch - 16ms/step
Epoch 8/300
10/10 - 0s - loss: 2.3840 - val_loss: 2.3852 - 160ms/epoch - 16ms/step
Epoch 9/300
10/10 - 0s - loss: 2.3604 - val_loss: 2.3700 - 172ms/epoch - 17ms/step
Epoch 10/300
10/10 - 0s - loss: 2.3392 - val_loss: 2.3354 - 156ms/epoch - 16ms/step
Epoch 11/300
10/10 - 0s - loss: 2.3042 - val_loss: 2.3099 - 170ms/epoch - 17ms/step
Epoch 12/300
10/10 - 0s - loss: 2.2769 - val_loss: 2.3126 - 171ms/epoch - 17ms/step
Epoch 13/300
10/10 - 0s - loss: 2.2541 - val_loss: 2.2784 - 174ms/epoch - 17ms/step
Epoch 14/300
10/10 - 0s - loss: 2.2175 - val_loss: 2.2354 - 174ms/epoch - 17ms/step
Epoch 15/300
10/10 - 0s - loss: 2.1957 - val_loss: 2.1990 - 173ms/epoch - 17ms/step
Epoch 16/300
10/10 - 0s - loss: 2.1533 - val_loss: 2.1686 - 167ms/epoch - 17ms/step
Epoch 17/300
10/10 - 0s - loss: 2.1232 - val_loss: 2.1276 - 178ms/epoch - 18ms/step
Epoch 18/300
10/10 - 0s - loss: 2.0932 - val_loss: 2.1220 - 173ms/epoch - 17ms/step
Epoch 19/300
10/10 - 0s - loss: 2.1068 - val_loss: 2.1515 - 152ms/epoch - 15ms/step
Epoch 20/300
10/10 - 0s - loss: 2.0793 - val_loss: 2.1661 - 161ms/epoch - 16ms/step
Epoch 21/300
10/10 - 0s - loss: 2.0784 - val_loss: 2.0634 - 180ms/epoch - 18ms/step
Epoch 22/300
10/10 - 0s - loss: 2.0060 - val_loss: 2.0076 - 160ms/epoch - 16ms/step
Epoch 23/300
10/10 - 0s - loss: 1.9845 - val_loss: 1.9773 - 174ms/epoch - 17ms/step
Epoch 24/300
10/10 - 0s - loss: 1.9462 - val_loss: 2.0097 - 174ms/epoch - 17ms/step
Epoch 25/300
10/10 - 0s - loss: 1.8892 - val_loss: 1.9023 - 173ms/epoch - 17ms/step
Epoch 26/300
10/10 - 0s - loss: 1.8011 - val_loss: 1.8128 - 182ms/epoch - 18ms/step
Epoch 27/300
10/10 - 0s - loss: 1.7604 - val_loss: 1.8415 - 167ms/epoch - 17ms/step
Epoch 28/300
10/10 - 0s - loss: 1.7474 - val_loss: 1.7635 - 172ms/epoch - 17ms/step
Epoch 29/300
10/10 - 0s - loss: 1.7313 - val_loss: 1.7154 - 175ms/epoch - 18ms/step
Epoch 30/300
10/10 - 0s - loss: 1.6801 - val_loss: 1.7269 - 183ms/epoch - 18ms/step
Epoch 31/300
10/10 - 0s - loss: 1.6892 - val_loss: 1.6588 - 170ms/epoch - 17ms/step
Epoch 32/300
10/10 - 0s - loss: 1.6384 - val_loss: 1.6467 - 159ms/epoch - 16ms/step
Epoch 33/300
10/10 - 0s - loss: 1.6263 - val_loss: 1.6214 - 164ms/epoch - 16ms/step
Epoch 34/300
10/10 - 0s - loss: 1.6035 - val_loss: 1.6022 - 154ms/epoch - 15ms/step
Epoch 35/300
10/10 - 0s - loss: 1.5872 - val_loss: 1.6203 - 159ms/epoch - 16ms/step
Epoch 36/300
10/10 - 0s - loss: 1.5928 - val_loss: 1.6312 - 159ms/epoch - 16ms/step
Epoch 37/300
10/10 - 0s - loss: 1.5895 - val_loss: 1.6337 - 148ms/epoch - 15ms/step
Epoch 38/300
10/10 - 0s - loss: 1.5726 - val_loss: 1.6192 - 153ms/epoch - 15ms/step
Epoch 39/300
10/10 - 0s - loss: 1.5537 - val_loss: 1.5919 - 168ms/epoch - 17ms/step
Epoch 40/300
10/10 - 0s - loss: 1.5741 - val_loss: 1.6646 - 173ms/epoch - 17ms/step
Epoch 41/300
10/10 - 0s - loss: 1.5737 - val_loss: 1.5718 - 181ms/epoch - 18ms/step
Epoch 42/300
10/10 - 0s - loss: 1.5573 - val_loss: 1.6395 - 173ms/epoch - 17ms/step
Epoch 43/300
10/10 - 0s - loss: 1.5574 - val_loss: 1.5779 - 183ms/epoch - 18ms/step
Epoch 44/300
10/10 - 0s - loss: 1.5345 - val_loss: 1.5549 - 173ms/epoch - 17ms/step
Epoch 45/300
10/10 - 0s - loss: 1.5256 - val_loss: 1.5944 - 161ms/epoch - 16ms/step
Epoch 46/300
10/10 - 0s - loss: 1.5291 - val_loss: 1.5325 - 169ms/epoch - 17ms/step
Epoch 47/300
10/10 - 0s - loss: 1.5341 - val_loss: 1.5929 - 177ms/epoch - 18ms/step
Epoch 48/300
10/10 - 0s - loss: 1.5190 - val_loss: 1.5563 - 174ms/epoch - 17ms/step
Epoch 49/300
10/10 - 0s - loss: 1.5059 - val_loss: 1.5079 - 187ms/epoch - 19ms/step
Epoch 50/300
10/10 - 0s - loss: 1.4971 - val_loss: 1.5163 - 177ms/epoch - 18ms/step
Epoch 51/300
10/10 - 0s - loss: 1.4923 - val_loss: 1.5549 - 168ms/epoch - 17ms/step
Epoch 52/300
10/10 - 0s - loss: 1.5345 - val_loss: 1.7131 - 171ms/epoch - 17ms/step
Epoch 53/300
10/10 - 0s - loss: 1.5381 - val_loss: 1.5102 - 174ms/epoch - 17ms/step
Epoch 54/300
10/10 - 0s - loss: 1.4955 - val_loss: 1.5432 - 167ms/epoch - 17ms/step
Epoch 55/300
10/10 - 0s - loss: 1.4829 - val_loss: 1.5166 - 172ms/epoch - 17ms/step
Epoch 56/300
10/10 - 0s - loss: 1.4672 - val_loss: 1.5297 - 180ms/epoch - 18ms/step
Epoch 57/300
10/10 - 0s - loss: 1.4814 - val_loss: 1.5115 - 166ms/epoch - 17ms/step
Epoch 58/300
10/10 - 0s - loss: 1.4738 - val_loss: 1.5143 - 165ms/epoch - 17ms/step
Epoch 59/300
10/10 - 0s - loss: 1.4639 - val_loss: 1.5326 - 175ms/epoch - 17ms/step
Epoch 60/300
10/10 - 0s - loss: 1.4727 - val_loss: 1.5419 - 175ms/epoch - 18ms/step
Epoch 61/300
10/10 - 0s - loss: 1.4610 - val_loss: 1.4663 - 177ms/epoch - 18ms/step
Epoch 62/300
10/10 - 0s - loss: 1.4512 - val_loss: 1.5624 - 179ms/epoch - 18ms/step
Epoch 63/300
10/10 - 0s - loss: 1.4816 - val_loss: 1.5711 - 176ms/epoch - 18ms/step
Epoch 64/300
10/10 - 0s - loss: 1.4735 - val_loss: 1.4988 - 181ms/epoch - 18ms/step
Epoch 65/300
10/10 - 0s - loss: 1.4443 - val_loss: 1.4850 - 185ms/epoch - 19ms/step
Epoch 66/300
10/10 - 0s - loss: 1.4441 - val_loss: 1.5275 - 179ms/epoch - 18ms/step
Epoch 67/300
10/10 - 0s - loss: 1.4751 - val_loss: 1.5191 - 177ms/epoch - 18ms/step
Epoch 68/300
10/10 - 0s - loss: 1.4874 - val_loss: 1.4888 - 175ms/epoch - 18ms/step
Epoch 69/300
10/10 - 0s - loss: 1.4442 - val_loss: 1.5044 - 167ms/epoch - 17ms/step
Epoch 70/300
10/10 - 0s - loss: 1.4645 - val_loss: 1.4801 - 174ms/epoch - 17ms/step
Epoch 71/300
10/10 - 0s - loss: 1.4648 - val_loss: 1.5016 - 174ms/epoch - 17ms/step
Epoch 72/300
10/10 - 0s - loss: 1.4336 - val_loss: 1.4970 - 171ms/epoch - 17ms/step
Epoch 73/300
10/10 - 0s - loss: 1.4852 - val_loss: 1.4561 - 176ms/epoch - 18ms/step
Epoch 74/300
10/10 - 0s - loss: 1.4656 - val_loss: 1.5156 - 167ms/epoch - 17ms/step
Epoch 75/300
10/10 - 0s - loss: 1.4359 - val_loss: 1.4154 - 175ms/epoch - 17ms/step
Epoch 76/300
10/10 - 0s - loss: 1.5187 - val_loss: 1.5395 - 182ms/epoch - 18ms/step
Epoch 77/300
10/10 - 0s - loss: 1.5554 - val_loss: 1.5524 - 179ms/epoch - 18ms/step
Epoch 78/300
10/10 - 0s - loss: 1.4679 - val_loss: 1.4742 - 175ms/epoch - 18ms/step
Epoch 79/300
10/10 - 0s - loss: 1.4433 - val_loss: 1.5862 - 177ms/epoch - 18ms/step
Epoch 80/300
10/10 - 0s - loss: 1.4775 - val_loss: 1.5030 - 189ms/epoch - 19ms/step
Epoch 81/300
10/10 - 0s - loss: 1.4020 - val_loss: 1.5264 - 169ms/epoch - 17ms/step
Epoch 82/300
10/10 - 0s - loss: 1.4298 - val_loss: 1.4841 - 170ms/epoch - 17ms/step
Epoch 83/300
10/10 - 0s - loss: 1.4329 - val_loss: 1.3966 - 177ms/epoch - 18ms/step
Epoch 84/300
10/10 - 0s - loss: 1.4106 - val_loss: 1.4472 - 183ms/epoch - 18ms/step
Epoch 85/300
10/10 - 0s - loss: 1.3902 - val_loss: 1.5917 - 174ms/epoch - 17ms/step
Epoch 86/300
10/10 - 0s - loss: 1.6401 - val_loss: 1.6188 - 181ms/epoch - 18ms/step
Epoch 87/300
10/10 - 0s - loss: 1.5748 - val_loss: 1.5913 - 177ms/epoch - 18ms/step
Epoch 88/300
10/10 - 0s - loss: 1.5449 - val_loss: 1.5437 - 185ms/epoch - 19ms/step
Epoch 89/300
10/10 - 0s - loss: 1.4769 - val_loss: 1.5344 - 185ms/epoch - 19ms/step
Epoch 90/300
10/10 - 0s - loss: 1.4805 - val_loss: 1.4814 - 173ms/epoch - 17ms/step
Epoch 91/300
10/10 - 0s - loss: 1.4540 - val_loss: 1.5087 - 170ms/epoch - 17ms/step
Epoch 92/300
10/10 - 0s - loss: 1.4266 - val_loss: 1.4554 - 169ms/epoch - 17ms/step
Epoch 93/300
10/10 - 0s - loss: 1.4014 - val_loss: 1.4492 - 185ms/epoch - 19ms/step
Epoch 94/300
10/10 - 0s - loss: 1.3701 - val_loss: 1.3875 - 182ms/epoch - 18ms/step
Epoch 95/300
10/10 - 0s - loss: 1.3792 - val_loss: 1.4288 - 193ms/epoch - 19ms/step
Epoch 96/300
10/10 - 0s - loss: 1.3813 - val_loss: 1.4452 - 180ms/epoch - 18ms/step
Epoch 97/300
10/10 - 0s - loss: 1.3505 - val_loss: 1.3954 - 173ms/epoch - 17ms/step
Epoch 98/300
10/10 - 0s - loss: 1.3870 - val_loss: 1.6328 - 178ms/epoch - 18ms/step
Epoch 99/300
10/10 - 0s - loss: 1.5100 - val_loss: 1.5139 - 174ms/epoch - 17ms/step
Epoch 100/300
10/10 - 0s - loss: 1.4355 - val_loss: 1.4654 - 176ms/epoch - 18ms/step
Epoch 101/300
10/10 - 0s - loss: 1.3967 - val_loss: 1.4168 - 156ms/epoch - 16ms/step
Epoch 102/300
10/10 - 0s - loss: 1.3466 - val_loss: 1.3765 - 164ms/epoch - 16ms/step
Epoch 103/300
10/10 - 0s - loss: 1.3188 - val_loss: 1.3783 - 182ms/epoch - 18ms/step
Epoch 104/300
10/10 - 0s - loss: 1.3659 - val_loss: 1.4572 - 190ms/epoch - 19ms/step
Epoch 105/300
10/10 - 0s - loss: 1.6089 - val_loss: 1.6353 - 184ms/epoch - 18ms/step
Epoch 106/300
10/10 - 0s - loss: 1.6317 - val_loss: 1.6007 - 171ms/epoch - 17ms/step
Epoch 107/300
10/10 - 0s - loss: 1.5652 - val_loss: 1.5409 - 184ms/epoch - 18ms/step
Epoch 108/300
10/10 - 0s - loss: 1.5273 - val_loss: 1.5030 - 165ms/epoch - 17ms/step
Epoch 109/300
10/10 - 0s - loss: 1.4750 - val_loss: 1.4796 - 179ms/epoch - 18ms/step
Epoch 110/300
10/10 - 0s - loss: 1.4710 - val_loss: 1.4996 - 175ms/epoch - 18ms/step
Epoch 111/300
10/10 - 0s - loss: 1.4805 - val_loss: 1.5006 - 179ms/epoch - 18ms/step
Epoch 112/300
10/10 - 0s - loss: 1.4518 - val_loss: 1.5023 - 184ms/epoch - 18ms/step
Epoch 113/300
10/10 - 0s - loss: 1.4313 - val_loss: 1.4234 - 162ms/epoch - 16ms/step
Epoch 114/300
10/10 - 0s - loss: 1.4113 - val_loss: 1.4629 - 178ms/epoch - 18ms/step
Epoch 115/300
10/10 - 0s - loss: 1.3999 - val_loss: 1.4300 - 170ms/epoch - 17ms/step
Epoch 116/300
10/10 - 0s - loss: 1.3886 - val_loss: 1.4042 - 179ms/epoch - 18ms/step
Epoch 117/300
10/10 - 0s - loss: 1.3659 - val_loss: 1.4245 - 182ms/epoch - 18ms/step
Epoch 118/300
10/10 - 0s - loss: 1.3605 - val_loss: 1.4482 - 162ms/epoch - 16ms/step
Epoch 119/300
10/10 - 0s - loss: 1.4003 - val_loss: 1.4756 - 163ms/epoch - 16ms/step
Epoch 120/300
10/10 - 0s - loss: 1.3749 - val_loss: 1.4237 - 189ms/epoch - 19ms/step
Epoch 121/300
10/10 - 0s - loss: 1.3323 - val_loss: 1.3580 - 189ms/epoch - 19ms/step
Epoch 122/300
10/10 - 0s - loss: 1.3464 - val_loss: 1.3684 - 187ms/epoch - 19ms/step
Epoch 123/300
10/10 - 0s - loss: 1.3430 - val_loss: 1.3345 - 183ms/epoch - 18ms/step
Epoch 124/300
10/10 - 0s - loss: 1.3402 - val_loss: 1.4077 - 183ms/epoch - 18ms/step
Epoch 125/300
10/10 - 0s - loss: 1.3861 - val_loss: 1.4208 - 165ms/epoch - 17ms/step
Epoch 126/300
10/10 - 0s - loss: 1.3665 - val_loss: 1.4796 - 163ms/epoch - 16ms/step
Epoch 127/300
10/10 - 0s - loss: 1.3912 - val_loss: 1.4770 - 169ms/epoch - 17ms/step
Epoch 128/300
10/10 - 0s - loss: 1.4114 - val_loss: 1.4261 - 166ms/epoch - 17ms/step
Epoch 129/300
10/10 - 0s - loss: 1.3687 - val_loss: 1.4488 - 165ms/epoch - 17ms/step
Epoch 130/300
10/10 - 0s - loss: 1.3576 - val_loss: 1.4333 - 173ms/epoch - 17ms/step
Epoch 131/300
10/10 - 0s - loss: 1.3413 - val_loss: 1.4298 - 180ms/epoch - 18ms/step
Epoch 132/300
10/10 - 0s - loss: 1.3331 - val_loss: 1.4388 - 190ms/epoch - 19ms/step
Epoch 133/300
10/10 - 0s - loss: 1.5913 - val_loss: 1.5962 - 188ms/epoch - 19ms/step
Epoch 134/300
10/10 - 0s - loss: 1.6076 - val_loss: 1.5921 - 179ms/epoch - 18ms/step
Epoch 135/300
10/10 - 0s - loss: 1.5387 - val_loss: 1.5856 - 183ms/epoch - 18ms/step
Epoch 136/300
10/10 - 0s - loss: 1.5088 - val_loss: 1.5209 - 159ms/epoch - 16ms/step
Epoch 137/300
10/10 - 0s - loss: 1.4640 - val_loss: 1.4599 - 175ms/epoch - 18ms/step
Epoch 138/300
10/10 - 0s - loss: 1.4140 - val_loss: 1.4659 - 177ms/epoch - 18ms/step
Epoch 139/300
10/10 - 0s - loss: 1.4138 - val_loss: 1.4327 - 179ms/epoch - 18ms/step
Epoch 140/300
10/10 - 0s - loss: 1.3911 - val_loss: 1.4366 - 178ms/epoch - 18ms/step
Epoch 141/300
10/10 - 0s - loss: 1.3870 - val_loss: 1.3962 - 182ms/epoch - 18ms/step
Epoch 142/300
10/10 - 0s - loss: 1.3699 - val_loss: 1.4742 - 154ms/epoch - 15ms/step
Epoch 143/300
10/10 - 0s - loss: 1.3630 - val_loss: 1.3963 - 158ms/epoch - 16ms/step
Epoch 144/300
10/10 - 0s - loss: 1.3818 - val_loss: 1.4538 - 184ms/epoch - 18ms/step
Epoch 145/300
10/10 - 0s - loss: 1.3564 - val_loss: 1.4057 - 182ms/epoch - 18ms/step
Epoch 146/300
10/10 - 0s - loss: 1.3353 - val_loss: 1.4064 - 186ms/epoch - 19ms/step
Epoch 147/300
10/10 - 0s - loss: 1.3285 - val_loss: 1.3835 - 172ms/epoch - 17ms/step
Epoch 148/300
10/10 - 0s - loss: 1.3100 - val_loss: 1.3817 - 188ms/epoch - 19ms/step
Epoch 149/300
10/10 - 0s - loss: 1.3761 - val_loss: 1.4566 - 189ms/epoch - 19ms/step
Epoch 150/300
10/10 - 0s - loss: 1.3473 - val_loss: 1.4378 - 188ms/epoch - 19ms/step
Epoch 151/300
10/10 - 0s - loss: 1.3106 - val_loss: 1.3616 - 182ms/epoch - 18ms/step
Epoch 152/300
10/10 - 0s - loss: 1.3239 - val_loss: 1.3468 - 177ms/epoch - 18ms/step
Epoch 153/300
10/10 - 0s - loss: 1.2947 - val_loss: 1.3523 - 172ms/epoch - 17ms/step
Epoch 154/300
10/10 - 0s - loss: 1.2850 - val_loss: 1.3530 - 170ms/epoch - 17ms/step
Epoch 155/300
10/10 - 0s - loss: 1.2834 - val_loss: 1.3878 - 171ms/epoch - 17ms/step
Epoch 156/300
10/10 - 0s - loss: 1.3192 - val_loss: 1.3747 - 179ms/epoch - 18ms/step
Epoch 157/300
10/10 - 0s - loss: 1.3567 - val_loss: 1.4031 - 174ms/epoch - 17ms/step
Epoch 158/300
10/10 - 0s - loss: 1.3240 - val_loss: 1.3735 - 167ms/epoch - 17ms/step
Epoch 159/300
10/10 - 0s - loss: 1.3272 - val_loss: 1.4563 - 183ms/epoch - 18ms/step
Epoch 160/300
10/10 - 0s - loss: 1.3329 - val_loss: 1.3321 - 179ms/epoch - 18ms/step
Epoch 161/300
10/10 - 0s - loss: 1.3120 - val_loss: 1.3779 - 185ms/epoch - 19ms/step
Epoch 162/300
10/10 - 0s - loss: 1.3093 - val_loss: 1.3739 - 191ms/epoch - 19ms/step
Epoch 163/300
10/10 - 0s - loss: 1.3251 - val_loss: 1.4781 - 182ms/epoch - 18ms/step
Epoch 164/300
10/10 - 0s - loss: 1.3762 - val_loss: 1.4035 - 165ms/epoch - 17ms/step
Epoch 165/300
10/10 - 0s - loss: 1.3655 - val_loss: 1.3693 - 189ms/epoch - 19ms/step
Epoch 166/300
10/10 - 0s - loss: 1.3453 - val_loss: 1.3694 - 170ms/epoch - 17ms/step
Epoch 167/300
10/10 - 0s - loss: 1.3019 - val_loss: 1.3496 - 180ms/epoch - 18ms/step
Epoch 168/300
10/10 - 0s - loss: 1.2801 - val_loss: 1.3375 - 190ms/epoch - 19ms/step
Epoch 169/300
10/10 - 0s - loss: 1.2966 - val_loss: 1.3712 - 178ms/epoch - 18ms/step
Epoch 170/300
10/10 - 0s - loss: 1.3060 - val_loss: 1.3237 - 177ms/epoch - 18ms/step
Epoch 171/300
10/10 - 0s - loss: 1.3299 - val_loss: 1.5022 - 177ms/epoch - 18ms/step
Epoch 172/300
10/10 - 0s - loss: 1.3665 - val_loss: 1.4224 - 186ms/epoch - 19ms/step
Epoch 173/300
10/10 - 0s - loss: 1.3432 - val_loss: 1.5198 - 172ms/epoch - 17ms/step
Epoch 174/300
10/10 - 0s - loss: 1.3434 - val_loss: 1.4113 - 188ms/epoch - 19ms/step
Epoch 175/300
10/10 - 0s - loss: 1.3016 - val_loss: 1.3920 - 175ms/epoch - 18ms/step
Epoch 176/300
10/10 - 0s - loss: 1.2833 - val_loss: 1.4342 - 166ms/epoch - 17ms/step
Epoch 177/300
10/10 - 0s - loss: 1.3334 - val_loss: 1.4225 - 178ms/epoch - 18ms/step
Epoch 178/300
10/10 - 0s - loss: 1.4085 - val_loss: 1.4848 - 170ms/epoch - 17ms/step
Epoch 179/300
10/10 - 0s - loss: 1.4262 - val_loss: 1.5149 - 176ms/epoch - 18ms/step
Epoch 180/300
10/10 - 0s - loss: 1.4076 - val_loss: 1.5736 - 175ms/epoch - 18ms/step
Epoch 181/300
10/10 - 0s - loss: 1.5085 - val_loss: 1.6339 - 179ms/epoch - 18ms/step
Epoch 182/300
10/10 - 0s - loss: 1.5028 - val_loss: 1.5327 - 179ms/epoch - 18ms/step
Epoch 183/300
10/10 - 0s - loss: 1.4710 - val_loss: 1.4611 - 196ms/epoch - 20ms/step
Epoch 184/300
10/10 - 0s - loss: 1.3950 - val_loss: 1.4205 - 190ms/epoch - 19ms/step
Epoch 185/300
10/10 - 0s - loss: 1.3815 - val_loss: 1.4100 - 187ms/epoch - 19ms/step
Epoch 186/300
10/10 - 0s - loss: 1.3722 - val_loss: 1.3939 - 163ms/epoch - 16ms/step
Epoch 187/300
10/10 - 0s - loss: 1.3379 - val_loss: 1.3922 - 194ms/epoch - 19ms/step
Epoch 188/300
10/10 - 0s - loss: 1.3406 - val_loss: 1.3874 - 189ms/epoch - 19ms/step
Epoch 189/300
10/10 - 0s - loss: 1.4787 - val_loss: 1.5603 - 190ms/epoch - 19ms/step
Epoch 190/300
10/10 - 0s - loss: 1.4652 - val_loss: 1.4490 - 163ms/epoch - 16ms/step
Epoch 191/300
10/10 - 0s - loss: 1.3868 - val_loss: 1.4725 - 179ms/epoch - 18ms/step
Epoch 192/300
10/10 - 0s - loss: 1.3470 - val_loss: 1.4088 - 186ms/epoch - 19ms/step
Epoch 193/300
10/10 - 0s - loss: 1.3576 - val_loss: 1.3549 - 193ms/epoch - 19ms/step
Epoch 194/300
10/10 - 0s - loss: 1.3574 - val_loss: 1.4884 - 188ms/epoch - 19ms/step
Epoch 195/300
10/10 - 0s - loss: 1.4376 - val_loss: 1.4794 - 172ms/epoch - 17ms/step
Epoch 196/300
10/10 - 0s - loss: 1.4110 - val_loss: 1.5064 - 175ms/epoch - 18ms/step
Epoch 197/300
10/10 - 0s - loss: 1.3597 - val_loss: 1.3742 - 159ms/epoch - 16ms/step
Epoch 198/300
10/10 - 0s - loss: 1.3897 - val_loss: 1.4465 - 188ms/epoch - 19ms/step
Epoch 199/300
10/10 - 0s - loss: 1.3710 - val_loss: 1.3469 - 175ms/epoch - 18ms/step
Epoch 200/300
10/10 - 0s - loss: 1.3613 - val_loss: 1.4129 - 183ms/epoch - 18ms/step
Epoch 201/300
10/10 - 0s - loss: 1.3581 - val_loss: 1.4100 - 189ms/epoch - 19ms/step
Epoch 202/300
10/10 - 0s - loss: 1.3047 - val_loss: 1.3460 - 164ms/epoch - 16ms/step
Epoch 203/300
10/10 - 0s - loss: 1.3133 - val_loss: 1.3942 - 185ms/epoch - 18ms/step
Epoch 204/300
10/10 - 0s - loss: 1.3880 - val_loss: 1.4730 - 179ms/epoch - 18ms/step
Epoch 205/300
10/10 - 0s - loss: 1.4233 - val_loss: 1.5020 - 196ms/epoch - 20ms/step
Epoch 206/300
10/10 - 0s - loss: 1.3696 - val_loss: 1.4541 - 188ms/epoch - 19ms/step
Epoch 207/300
10/10 - 0s - loss: 1.3189 - val_loss: 1.4825 - 181ms/epoch - 18ms/step
Epoch 208/300
10/10 - 0s - loss: 1.7335 - val_loss: 1.7628 - 170ms/epoch - 17ms/step
Epoch 209/300
10/10 - 0s - loss: 1.6927 - val_loss: 1.6906 - 180ms/epoch - 18ms/step
Epoch 210/300
10/10 - 0s - loss: 1.6293 - val_loss: 1.6065 - 191ms/epoch - 19ms/step
Epoch 211/300
10/10 - 0s - loss: 1.5564 - val_loss: 1.5873 - 179ms/epoch - 18ms/step
Epoch 212/300
10/10 - 0s - loss: 1.5258 - val_loss: 1.5561 - 188ms/epoch - 19ms/step
Epoch 213/300
10/10 - 0s - loss: 1.4918 - val_loss: 1.5715 - 175ms/epoch - 17ms/step
Epoch 214/300
10/10 - 0s - loss: 1.4800 - val_loss: 1.5373 - 166ms/epoch - 17ms/step
Epoch 215/300
10/10 - 0s - loss: 1.4669 - val_loss: 1.5262 - 171ms/epoch - 17ms/step
Epoch 216/300
10/10 - 0s - loss: 1.4492 - val_loss: 1.4965 - 168ms/epoch - 17ms/step
Epoch 217/300
10/10 - 0s - loss: 1.4169 - val_loss: 1.4874 - 160ms/epoch - 16ms/step
Epoch 218/300
10/10 - 0s - loss: 1.4192 - val_loss: 1.4848 - 175ms/epoch - 18ms/step
Epoch 219/300
10/10 - 0s - loss: 1.4072 - val_loss: 1.4776 - 167ms/epoch - 17ms/step
Epoch 220/300
10/10 - 0s - loss: 1.3936 - val_loss: 1.4824 - 163ms/epoch - 16ms/step
Epoch 221/300
10/10 - 0s - loss: 1.3813 - val_loss: 1.4814 - 190ms/epoch - 19ms/step
Epoch 222/300
10/10 - 0s - loss: 1.3821 - val_loss: 1.4344 - 192ms/epoch - 19ms/step
Epoch 223/300
10/10 - 0s - loss: 1.3724 - val_loss: 1.4691 - 197ms/epoch - 20ms/step
Epoch 224/300
10/10 - 0s - loss: 1.3818 - val_loss: 1.4371 - 186ms/epoch - 19ms/step
Epoch 225/300
10/10 - 0s - loss: 1.3986 - val_loss: 1.4602 - 174ms/epoch - 17ms/step
Epoch 226/300
10/10 - 0s - loss: 1.3620 - val_loss: 1.4268 - 162ms/epoch - 16ms/step
Epoch 227/300
10/10 - 0s - loss: 1.3658 - val_loss: 1.5127 - 162ms/epoch - 16ms/step
Epoch 228/300
10/10 - 0s - loss: 1.3994 - val_loss: 1.4251 - 182ms/epoch - 18ms/step
Epoch 229/300
10/10 - 0s - loss: 1.3674 - val_loss: 1.4542 - 181ms/epoch - 18ms/step
Epoch 230/300
10/10 - 0s - loss: 1.3453 - val_loss: 1.4165 - 178ms/epoch - 18ms/step
Epoch 231/300
10/10 - 0s - loss: 1.3473 - val_loss: 1.4112 - 185ms/epoch - 19ms/step
Epoch 232/300
10/10 - 0s - loss: 1.3373 - val_loss: 1.3559 - 193ms/epoch - 19ms/step
Epoch 233/300
10/10 - 0s - loss: 1.3267 - val_loss: 1.4230 - 185ms/epoch - 19ms/step
Epoch 234/300
10/10 - 0s - loss: 1.4402 - val_loss: 1.5016 - 194ms/epoch - 19ms/step
Epoch 235/300
10/10 - 0s - loss: 1.4497 - val_loss: 1.5198 - 182ms/epoch - 18ms/step
Epoch 236/300
10/10 - 0s - loss: 1.3724 - val_loss: 1.4116 - 174ms/epoch - 17ms/step
Epoch 237/300
10/10 - 0s - loss: 1.3275 - val_loss: 1.4120 - 190ms/epoch - 19ms/step
Epoch 238/300
10/10 - 0s - loss: 1.4089 - val_loss: 1.4978 - 180ms/epoch - 18ms/step
Epoch 239/300
10/10 - 0s - loss: 1.4203 - val_loss: 1.4340 - 197ms/epoch - 20ms/step
Epoch 240/300
10/10 - 0s - loss: 1.4002 - val_loss: 1.4535 - 181ms/epoch - 18ms/step
Epoch 241/300
10/10 - 0s - loss: 1.3915 - val_loss: 1.4112 - 179ms/epoch - 18ms/step
Epoch 242/300
10/10 - 0s - loss: 1.4050 - val_loss: 1.4437 - 173ms/epoch - 17ms/step
Epoch 243/300
10/10 - 0s - loss: 1.3834 - val_loss: 1.3841 - 183ms/epoch - 18ms/step
Epoch 244/300
10/10 - 0s - loss: 1.3550 - val_loss: 1.4028 - 185ms/epoch - 19ms/step
Epoch 245/300
10/10 - 0s - loss: 1.3415 - val_loss: 1.4119 - 200ms/epoch - 20ms/step
Epoch 246/300
10/10 - 0s - loss: 1.3579 - val_loss: 1.4416 - 188ms/epoch - 19ms/step
Epoch 247/300
10/10 - 0s - loss: 1.3397 - val_loss: 1.4257 - 173ms/epoch - 17ms/step
Epoch 248/300
10/10 - 0s - loss: 1.3353 - val_loss: 1.3809 - 188ms/epoch - 19ms/step
Epoch 249/300
10/10 - 0s - loss: 1.3211 - val_loss: 1.3619 - 169ms/epoch - 17ms/step
Epoch 250/300
10/10 - 0s - loss: 1.3052 - val_loss: 1.3735 - 168ms/epoch - 17ms/step
Epoch 251/300
10/10 - 0s - loss: 1.3121 - val_loss: 1.3636 - 183ms/epoch - 18ms/step
Epoch 252/300
10/10 - 0s - loss: 1.3121 - val_loss: 1.3741 - 177ms/epoch - 18ms/step
Epoch 253/300
10/10 - 0s - loss: 1.3108 - val_loss: 1.3680 - 168ms/epoch - 17ms/step
Epoch 254/300
10/10 - 0s - loss: 1.3188 - val_loss: 1.4326 - 184ms/epoch - 18ms/step
Epoch 255/300
10/10 - 0s - loss: 1.3111 - val_loss: 1.3853 - 183ms/epoch - 18ms/step
Epoch 256/300
10/10 - 0s - loss: 1.3036 - val_loss: 1.4108 - 195ms/epoch - 20ms/step
Epoch 257/300
10/10 - 0s - loss: 1.2867 - val_loss: 1.3785 - 183ms/epoch - 18ms/step
Epoch 258/300
10/10 - 0s - loss: 1.2768 - val_loss: 1.3614 - 165ms/epoch - 17ms/step
Epoch 259/300
10/10 - 0s - loss: 1.3092 - val_loss: 1.3846 - 176ms/epoch - 18ms/step
Epoch 260/300
10/10 - 0s - loss: 1.2845 - val_loss: 1.3970 - 169ms/epoch - 17ms/step
Epoch 261/300
10/10 - 0s - loss: 1.3381 - val_loss: 1.3931 - 175ms/epoch - 18ms/step
Epoch 262/300
10/10 - 0s - loss: 1.3067 - val_loss: 1.3953 - 176ms/epoch - 18ms/step
Epoch 263/300
10/10 - 0s - loss: 1.2947 - val_loss: 1.3783 - 170ms/epoch - 17ms/step
Epoch 264/300
10/10 - 0s - loss: 1.2947 - val_loss: 1.3805 - 187ms/epoch - 19ms/step
Epoch 265/300
10/10 - 0s - loss: 1.3187 - val_loss: 1.3418 - 187ms/epoch - 19ms/step
Epoch 266/300
10/10 - 0s - loss: 1.2830 - val_loss: 1.4077 - 197ms/epoch - 20ms/step
Epoch 267/300
10/10 - 0s - loss: 1.3008 - val_loss: 1.3461 - 198ms/epoch - 20ms/step
Epoch 268/300
10/10 - 0s - loss: 1.3230 - val_loss: 1.3495 - 183ms/epoch - 18ms/step
Epoch 269/300
10/10 - 0s - loss: 1.3171 - val_loss: 1.3547 - 182ms/epoch - 18ms/step
Epoch 270/300
10/10 - 0s - loss: 1.3216 - val_loss: 1.4041 - 191ms/epoch - 19ms/step
Epoch 271/300
10/10 - 0s - loss: 1.3147 - val_loss: 1.4394 - 182ms/epoch - 18ms/step
Epoch 272/300
10/10 - 0s - loss: 1.3062 - val_loss: 1.4410 - 196ms/epoch - 20ms/step
Epoch 273/300
10/10 - 0s - loss: 1.3154 - val_loss: 1.4076 - 166ms/epoch - 17ms/step
Epoch 274/300
10/10 - 0s - loss: 1.2999 - val_loss: 1.3703 - 161ms/epoch - 16ms/step
Epoch 275/300
10/10 - 0s - loss: 1.2730 - val_loss: 1.3523 - 179ms/epoch - 18ms/step
Epoch 276/300
10/10 - 0s - loss: 1.2773 - val_loss: 1.3488 - 188ms/epoch - 19ms/step
Epoch 277/300
10/10 - 0s - loss: 1.3017 - val_loss: 1.3812 - 184ms/epoch - 18ms/step
Epoch 278/300
10/10 - 0s - loss: 1.2857 - val_loss: 1.4040 - 184ms/epoch - 18ms/step
Epoch 279/300
10/10 - 0s - loss: 1.3243 - val_loss: 1.3774 - 181ms/epoch - 18ms/step
Epoch 280/300
10/10 - 0s - loss: 1.3258 - val_loss: 1.4166 - 161ms/epoch - 16ms/step
Epoch 281/300
10/10 - 0s - loss: 1.3004 - val_loss: 1.3956 - 179ms/epoch - 18ms/step
Epoch 282/300
10/10 - 0s - loss: 1.3407 - val_loss: 1.3529 - 182ms/epoch - 18ms/step
Epoch 283/300
10/10 - 0s - loss: 1.3269 - val_loss: 1.3986 - 183ms/epoch - 18ms/step
Epoch 284/300
10/10 - 0s - loss: 1.3138 - val_loss: 1.4302 - 187ms/epoch - 19ms/step
Epoch 285/300
10/10 - 0s - loss: 1.2999 - val_loss: 1.3942 - 167ms/epoch - 17ms/step
Epoch 286/300
10/10 - 0s - loss: 1.2871 - val_loss: 1.4190 - 161ms/epoch - 16ms/step
Epoch 287/300
10/10 - 0s - loss: 1.3094 - val_loss: 1.3905 - 176ms/epoch - 18ms/step
Epoch 288/300
10/10 - 0s - loss: 1.3072 - val_loss: 1.3681 - 168ms/epoch - 17ms/step
Epoch 289/300
10/10 - 0s - loss: 1.2890 - val_loss: 1.3863 - 190ms/epoch - 19ms/step
Epoch 290/300
10/10 - 0s - loss: 1.2861 - val_loss: 1.4039 - 183ms/epoch - 18ms/step
Epoch 291/300
10/10 - 0s - loss: 1.2845 - val_loss: 1.4018 - 162ms/epoch - 16ms/step
Epoch 292/300
10/10 - 0s - loss: 1.2747 - val_loss: 1.4085 - 184ms/epoch - 18ms/step
Epoch 293/300
10/10 - 0s - loss: 1.2728 - val_loss: 1.3846 - 185ms/epoch - 19ms/step
Epoch 294/300
10/10 - 0s - loss: 1.2567 - val_loss: 1.3465 - 180ms/epoch - 18ms/step
Epoch 295/300
10/10 - 0s - loss: 1.2643 - val_loss: 1.3914 - 195ms/epoch - 20ms/step
Epoch 296/300
10/10 - 0s - loss: 1.2747 - val_loss: 1.4068 - 182ms/epoch - 18ms/step
Epoch 297/300
10/10 - 0s - loss: 1.3311 - val_loss: 1.5587 - 169ms/epoch - 17ms/step
Epoch 298/300
10/10 - 0s - loss: 1.3347 - val_loss: 1.4132 - 181ms/epoch - 18ms/step
Epoch 299/300
10/10 - 0s - loss: 1.3485 - val_loss: 1.4953 - 200ms/epoch - 20ms/step
Epoch 300/300
10/10 - 0s - loss: 1.3156 - val_loss: 1.4378 - 203ms/epoch - 20ms/step
```
</div>
---
## Performance evaluation
```python
plt.figure(figsize=(15, 10))
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.legend(["train", "validation"], loc="upper right")
plt.ylabel("loss")
plt.xlabel("epoch")
# From data to latent space.
z, _ = model(normalized_data)
# From latent space to data.
samples = model.distribution.sample(3000)
x, _ = model.predict(samples)
f, axes = plt.subplots(2, 2)
f.set_size_inches(20, 15)
axes[0, 0].scatter(normalized_data[:, 0], normalized_data[:, 1], color="r")
axes[0, 0].set(title="Inference data space X", xlabel="x", ylabel="y")
axes[0, 1].scatter(z[:, 0], z[:, 1], color="r")
axes[0, 1].set(title="Inference latent space Z", xlabel="x", ylabel="y")
axes[0, 1].set_xlim([-3.5, 4])
axes[0, 1].set_ylim([-4, 4])
axes[1, 0].scatter(samples[:, 0], samples[:, 1], color="g")
axes[1, 0].set(title="Generated latent space Z", xlabel="x", ylabel="y")
axes[1, 1].scatter(x[:, 0], x[:, 1], color="g")
axes[1, 1].set(title="Generated data space X", label="x", ylabel="y")
axes[1, 1].set_xlim([-2, 2])
axes[1, 1].set_ylim([-2, 2])
```
<div class="k-default-codeblock">
```
94/94 [==============================] - 0s 2ms/step
```
</div>
<div class="k-default-codeblock">
```
(-2.0, 2.0)
```
</div>


| keras-io/examples/generative/md/real_nvp.md/0 | {
"file_path": "keras-io/examples/generative/md/real_nvp.md",
"repo_id": "keras-io",
"token_count": 15325
} | 97 |
"""
Title: Creating TFRecords
Author: [Dimitre Oliveira](https://www.linkedin.com/in/dimitre-oliveira-7a1a0113a/)
Date created: 2021/02/27
Last modified: 2023/12/20
Description: Converting data to the TFRecord format.
Accelerator: GPU
"""
"""
## Introduction
The TFRecord format is a simple format for storing a sequence of binary records.
Converting your data into TFRecord has many advantages, such as:
- **More efficient storage**: the TFRecord data can take up less space than the original
data; it can also be partitioned into multiple files.
- **Fast I/O**: the TFRecord format can be read with parallel I/O operations, which is
useful for [TPUs](https://www.tensorflow.org/guide/tpu) or multiple hosts.
- **Self-contained files**: the TFRecord data can be read from a single source—for
example, the [COCO2017](https://cocodataset.org/) dataset originally stores data in
two folders ("images" and "annotations").
An important use case of the TFRecord data format is training on TPUs. First, TPUs are
fast enough to benefit from optimized I/O operations. In addition, TPUs require
data to be stored remotely (e.g. on Google Cloud Storage) and using the TFRecord format
makes it easier to load the data without batch-downloading.
Performance using the TFRecord format can be further improved if you also use
it with the [tf.data](https://www.tensorflow.org/guide/data) API.
In this example you will learn how to convert data of different types (image, text, and
numeric) into TFRecord.
**Reference**
- [TFRecord and tf.train.Example](https://www.tensorflow.org/tutorials/load_data/tfrecord)
## Dependencies
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import json
import pprint
import tensorflow as tf
import matplotlib.pyplot as plt
"""
## Download the COCO2017 dataset
We will be using the [COCO2017](https://cocodataset.org/) dataset, because it has many
different types of features, including images, floating point data, and lists.
It will serve as a good example of how to encode different features into the TFRecord
format.
This dataset has two sets of fields: images and annotation meta-data.
The images are a collection of JPG files and the meta-data are stored in a JSON file
which, according to the [official site](https://cocodataset.org/#format-data),
contains the following properties:
```
id: int,
image_id: int,
category_id: int,
segmentation: RLE or [polygon], object segmentation mask
bbox: [x,y,width,height], object bounding box coordinates
area: float, area of the bounding box
iscrowd: 0 or 1, is single object or a collection
```
"""
root_dir = "datasets"
tfrecords_dir = "tfrecords"
images_dir = os.path.join(root_dir, "val2017")
annotations_dir = os.path.join(root_dir, "annotations")
annotation_file = os.path.join(annotations_dir, "instances_val2017.json")
images_url = "http://images.cocodataset.org/zips/val2017.zip"
annotations_url = (
"http://images.cocodataset.org/annotations/annotations_trainval2017.zip"
)
# Download image files
if not os.path.exists(images_dir):
image_zip = keras.utils.get_file(
"images.zip",
cache_dir=os.path.abspath("."),
origin=images_url,
extract=True,
)
os.remove(image_zip)
# Download caption annotation files
if not os.path.exists(annotations_dir):
annotation_zip = keras.utils.get_file(
"captions.zip",
cache_dir=os.path.abspath("."),
origin=annotations_url,
extract=True,
)
os.remove(annotation_zip)
print("The COCO dataset has been downloaded and extracted successfully.")
with open(annotation_file, "r") as f:
annotations = json.load(f)["annotations"]
print(f"Number of images: {len(annotations)}")
"""
### Contents of the COCO2017 dataset
"""
pprint.pprint(annotations[60])
"""
## Parameters
`num_samples` is the number of data samples on each TFRecord file.
`num_tfrecords` is total number of TFRecords that we will create.
"""
num_samples = 4096
num_tfrecords = len(annotations) // num_samples
if len(annotations) % num_samples:
num_tfrecords += 1 # add one record if there are any remaining samples
if not os.path.exists(tfrecords_dir):
os.makedirs(tfrecords_dir) # creating TFRecords output folder
"""
## Define TFRecords helper functions
"""
def image_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.encode_jpeg(value).numpy()])
)
def bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value.encode()]))
def float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def float_feature_list(value):
"""Returns a list of float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def create_example(image, path, example):
feature = {
"image": image_feature(image),
"path": bytes_feature(path),
"area": float_feature(example["area"]),
"bbox": float_feature_list(example["bbox"]),
"category_id": int64_feature(example["category_id"]),
"id": int64_feature(example["id"]),
"image_id": int64_feature(example["image_id"]),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_tfrecord_fn(example):
feature_description = {
"image": tf.io.FixedLenFeature([], tf.string),
"path": tf.io.FixedLenFeature([], tf.string),
"area": tf.io.FixedLenFeature([], tf.float32),
"bbox": tf.io.VarLenFeature(tf.float32),
"category_id": tf.io.FixedLenFeature([], tf.int64),
"id": tf.io.FixedLenFeature([], tf.int64),
"image_id": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, feature_description)
example["image"] = tf.io.decode_jpeg(example["image"], channels=3)
example["bbox"] = tf.sparse.to_dense(example["bbox"])
return example
"""
## Generate data in the TFRecord format
Let's generate the COCO2017 data in the TFRecord format. The format will be
`file_{number}.tfrec` (this is optional, but including the number sequences in the file
names can make counting easier).
"""
for tfrec_num in range(num_tfrecords):
samples = annotations[(tfrec_num * num_samples) : ((tfrec_num + 1) * num_samples)]
with tf.io.TFRecordWriter(
tfrecords_dir + "/file_%.2i-%i.tfrec" % (tfrec_num, len(samples))
) as writer:
for sample in samples:
image_path = f"{images_dir}/{sample['image_id']:012d}.jpg"
image = tf.io.decode_jpeg(tf.io.read_file(image_path))
example = create_example(image, image_path, sample)
writer.write(example.SerializeToString())
"""
## Explore one sample from the generated TFRecord
"""
raw_dataset = tf.data.TFRecordDataset(f"{tfrecords_dir}/file_00-{num_samples}.tfrec")
parsed_dataset = raw_dataset.map(parse_tfrecord_fn)
for features in parsed_dataset.take(1):
for key in features.keys():
if key != "image":
print(f"{key}: {features[key]}")
print(f"Image shape: {features['image'].shape}")
plt.figure(figsize=(7, 7))
plt.imshow(features["image"].numpy())
plt.show()
"""
## Train a simple model using the generated TFRecords
Another advantage of TFRecord is that you are able to add many features to it and later
use only a few of them, in this case, we are going to use only `image` and `category_id`.
"""
"""
## Define dataset helper functions
"""
def prepare_sample(features):
image = keras.ops.image.resize(features["image"], size=(224, 224))
return image, features["category_id"]
def get_dataset(filenames, batch_size):
dataset = (
tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTOTUNE)
.map(parse_tfrecord_fn, num_parallel_calls=AUTOTUNE)
.map(prepare_sample, num_parallel_calls=AUTOTUNE)
.shuffle(batch_size * 10)
.batch(batch_size)
.prefetch(AUTOTUNE)
)
return dataset
train_filenames = tf.io.gfile.glob(f"{tfrecords_dir}/*.tfrec")
batch_size = 32
epochs = 1
steps_per_epoch = 50
AUTOTUNE = tf.data.AUTOTUNE
input_tensor = keras.layers.Input(shape=(224, 224, 3), name="image")
model = keras.applications.EfficientNetB0(
input_tensor=input_tensor, weights=None, classes=91
)
model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
model.fit(
x=get_dataset(train_filenames, batch_size),
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=1,
)
"""
## Conclusion
This example demonstrates that instead of reading images and annotations from different
sources you can have your data coming from a single source thanks to TFRecord.
This process can make storing and reading data simpler and more efficient.
For more information, you can go to the [TFRecord and
tf.train.Example](https://www.tensorflow.org/tutorials/load_data/tfrecord) tutorial.
"""
| keras-io/examples/keras_recipes/creating_tfrecords.py/0 | {
"file_path": "keras-io/examples/keras_recipes/creating_tfrecords.py",
"repo_id": "keras-io",
"token_count": 3372
} | 98 |
"""
Title: How to train a Keras model on TFRecord files
Author: Amy MiHyun Jang
Date created: 2020/07/29
Last modified: 2020/08/07
Description: Loading TFRecords for computer vision models.
Accelerator: TPU
"""
"""
## Introduction + Set Up
TFRecords store a sequence of binary records, read linearly. They are useful format for
storing data because they can be read efficiently. Learn more about TFRecords
[here](https://www.tensorflow.org/tutorials/load_data/tfrecord).
We'll explore how we can easily load in TFRecords for our melanoma classifier.
"""
import tensorflow as tf
from functools import partial
import matplotlib.pyplot as plt
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
print("Device:", tpu.master())
strategy = tf.distribute.TPUStrategy(tpu)
except:
strategy = tf.distribute.get_strategy()
print("Number of replicas:", strategy.num_replicas_in_sync)
"""
We want a bigger batch size as our data is not balanced.
"""
AUTOTUNE = tf.data.AUTOTUNE
GCS_PATH = "gs://kds-b38ce1b823c3ae623f5691483dbaa0f0363f04b0d6a90b63cf69946e"
BATCH_SIZE = 64
IMAGE_SIZE = [1024, 1024]
"""
## Load the data
"""
FILENAMES = tf.io.gfile.glob(GCS_PATH + "/tfrecords/train*.tfrec")
split_ind = int(0.9 * len(FILENAMES))
TRAINING_FILENAMES, VALID_FILENAMES = FILENAMES[:split_ind], FILENAMES[split_ind:]
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + "/tfrecords/test*.tfrec")
print("Train TFRecord Files:", len(TRAINING_FILENAMES))
print("Validation TFRecord Files:", len(VALID_FILENAMES))
print("Test TFRecord Files:", len(TEST_FILENAMES))
"""
### Decoding the data
The images have to be converted to tensors so that it will be a valid input in our model.
As images utilize an RBG scale, we specify 3 channels.
We also reshape our data so that all of the images will be the same shape.
"""
def decode_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
"""
As we load in our data, we need both our `X` and our `Y`. The X is our image; the model
will find features and patterns in our image dataset. We want to predict Y, the
probability that the lesion in the image is malignant. We will to through our TFRecords
and parse out the image and the target values.
"""
def read_tfrecord(example, labeled):
tfrecord_format = (
{
"image": tf.io.FixedLenFeature([], tf.string),
"target": tf.io.FixedLenFeature([], tf.int64),
}
if labeled
else {
"image": tf.io.FixedLenFeature([], tf.string),
}
)
example = tf.io.parse_single_example(example, tfrecord_format)
image = decode_image(example["image"])
if labeled:
label = tf.cast(example["target"], tf.int32)
return image, label
return image
"""
### Define loading methods
Our dataset is not ordered in any meaningful way, so the order can be ignored when
loading our dataset. By ignoring the order and reading files as soon as they come in, it
will take a shorter time to load the data.
"""
def load_dataset(filenames, labeled=True):
ignore_order = tf.data.Options()
ignore_order.experimental_deterministic = False # disable order, increase speed
dataset = tf.data.TFRecordDataset(
filenames
) # automatically interleaves reads from multiple files
dataset = dataset.with_options(
ignore_order
) # uses data as soon as it streams in, rather than in its original order
dataset = dataset.map(
partial(read_tfrecord, labeled=labeled), num_parallel_calls=AUTOTUNE
)
# returns a dataset of (image, label) pairs if labeled=True or just images if labeled=False
return dataset
"""
We define the following function to get our different datasets.
"""
def get_dataset(filenames, labeled=True):
dataset = load_dataset(filenames, labeled=labeled)
dataset = dataset.shuffle(2048)
dataset = dataset.prefetch(buffer_size=AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
return dataset
"""
### Visualize input images
"""
train_dataset = get_dataset(TRAINING_FILENAMES)
valid_dataset = get_dataset(VALID_FILENAMES)
test_dataset = get_dataset(TEST_FILENAMES, labeled=False)
image_batch, label_batch = next(iter(train_dataset))
def show_batch(image_batch, label_batch):
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(image_batch[n] / 255.0)
if label_batch[n]:
plt.title("MALIGNANT")
else:
plt.title("BENIGN")
plt.axis("off")
show_batch(image_batch.numpy(), label_batch.numpy())
"""
## Building our model
"""
"""
### Define callbacks
The following function allows for the model to change the learning rate as it runs each
epoch.
We can use callbacks to stop training when there are no improvements in the model. At the
end of the training process, the model will restore the weights of its best iteration.
"""
initial_learning_rate = 0.01
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=20, decay_rate=0.96, staircase=True
)
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(
"melanoma_model.h5", save_best_only=True
)
early_stopping_cb = tf.keras.callbacks.EarlyStopping(
patience=10, restore_best_weights=True
)
"""
### Build our base model
Transfer learning is a great way to reap the benefits of a well-trained model without
having the train the model ourselves. For this notebook, we want to import the Xception
model. A more in-depth analysis of transfer learning can be found
[here](https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/).
We do not want our metric to be ```accuracy``` because our data is imbalanced. For our
example, we will be looking at the area under a ROC curve.
"""
def make_model():
base_model = tf.keras.applications.Xception(
input_shape=(*IMAGE_SIZE, 3), include_top=False, weights="imagenet"
)
base_model.trainable = False
inputs = tf.keras.layers.Input([*IMAGE_SIZE, 3])
x = tf.keras.applications.xception.preprocess_input(inputs)
x = base_model(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(8, activation="relu")(x)
x = tf.keras.layers.Dropout(0.7)(x)
outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
loss="binary_crossentropy",
metrics=tf.keras.metrics.AUC(name="auc"),
)
return model
"""
## Train the model
"""
with strategy.scope():
model = make_model()
history = model.fit(
train_dataset,
epochs=2,
validation_data=valid_dataset,
callbacks=[checkpoint_cb, early_stopping_cb],
)
"""
## Predict results
We'll use our model to predict results for our test dataset images. Values closer to `0`
are more likely to be benign and values closer to `1` are more likely to be malignant.
"""
def show_batch_predictions(image_batch):
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(image_batch[n] / 255.0)
img_array = tf.expand_dims(image_batch[n], axis=0)
plt.title(model.predict(img_array)[0])
plt.axis("off")
image_batch = next(iter(test_dataset))
show_batch_predictions(image_batch)
| keras-io/examples/keras_recipes/tfrecord.py/0 | {
"file_path": "keras-io/examples/keras_recipes/tfrecord.py",
"repo_id": "keras-io",
"token_count": 2771
} | 99 |
<jupyter_start><jupyter_text>Large-scale multi-label text classification**Author:** [Sayak Paul](https://twitter.com/RisingSayak), [Soumik Rakshit](https://github.com/soumik12345)**Date created:** 2020/09/25**Last modified:** 2020/12/23**Description:** Implementing a large-scale multi-label text classification model. IntroductionIn this example, we will build a multi-label text classifier to predict the subject areasof arXiv papers from their abstract bodies. This type of classifier can be useful forconference submission portals like [OpenReview](https://openreview.net/). Given a paperabstract, the portal could provide suggestions for which areas the paper wouldbest belong to.The dataset was collected using the[`arXiv` Python library](https://github.com/lukasschwab/arxiv.py)that provides a wrapper around the[original arXiv API](http://arxiv.org/help/api/index).To learn more about the data collection process, please refer to[this notebook](https://github.com/soumik12345/multi-label-text-classification/blob/master/arxiv_scrape.ipynb).Additionally, you can also find the dataset on[Kaggle](https://www.kaggle.com/spsayakpaul/arxiv-paper-abstracts). Imports<jupyter_code>from tensorflow.keras import layers
from tensorflow import keras
import tensorflow as tf
from sklearn.model_selection import train_test_split
from ast import literal_eval
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np<jupyter_output><empty_output><jupyter_text>Perform exploratory data analysisIn this section, we first load the dataset into a `pandas` dataframe and then performsome basic exploratory data analysis (EDA).<jupyter_code>arxiv_data = pd.read_csv(
"https://github.com/soumik12345/multi-label-text-classification/releases/download/v0.2/arxiv_data.csv"
)
arxiv_data.head()<jupyter_output><empty_output><jupyter_text>Our text features are present in the `summaries` column and their corresponding labelsare in `terms`. As you can notice, there are multiple categories associated with aparticular entry.<jupyter_code>print(f"There are {len(arxiv_data)} rows in the dataset.")<jupyter_output><empty_output><jupyter_text>Real-world data is noisy. One of the most commonly observed source of noise is dataduplication. Here we notice that our initial dataset has got about 13k duplicate entries.<jupyter_code>total_duplicate_titles = sum(arxiv_data["titles"].duplicated())
print(f"There are {total_duplicate_titles} duplicate titles.")<jupyter_output><empty_output><jupyter_text>Before proceeding further, we drop these entries.<jupyter_code>arxiv_data = arxiv_data[~arxiv_data["titles"].duplicated()]
print(f"There are {len(arxiv_data)} rows in the deduplicated dataset.")
# There are some terms with occurrence as low as 1.
print(sum(arxiv_data["terms"].value_counts() == 1))
# How many unique terms?
print(arxiv_data["terms"].nunique())<jupyter_output><empty_output><jupyter_text>As observed above, out of 3,157 unique combinations of `terms`, 2,321 entries have thelowest occurrence. To prepare our train, validation, and test sets with[stratification](https://en.wikipedia.org/wiki/Stratified_sampling), we need to dropthese terms.<jupyter_code># Filtering the rare terms.
arxiv_data_filtered = arxiv_data.groupby("terms").filter(lambda x: len(x) > 1)
arxiv_data_filtered.shape<jupyter_output><empty_output><jupyter_text>Convert the string labels to lists of stringsThe initial labels are represented as raw strings. Here we make them `List[str]` for amore compact representation.<jupyter_code>arxiv_data_filtered["terms"] = arxiv_data_filtered["terms"].apply(
lambda x: literal_eval(x)
)
arxiv_data_filtered["terms"].values[:5]<jupyter_output><empty_output><jupyter_text>Use stratified splits because of class imbalanceThe dataset has a[class imbalance problem](https://developers.google.com/machine-learning/glossary/class-imbalanced-dataset).So, to have a fair evaluation result, we need to ensure the datasets are sampled withstratification. To know more about different strategies to deal with the class imbalanceproblem, you can follow[this tutorial](https://www.tensorflow.org/tutorials/structured_data/imbalanced_data).For an end-to-end demonstration of classification with imbablanced data, refer to[Imbalanced classification: credit card fraud detection](https://keras.io/examples/structured_data/imbalanced_classification/).<jupyter_code>test_split = 0.1
# Initial train and test split.
train_df, test_df = train_test_split(
arxiv_data_filtered,
test_size=test_split,
stratify=arxiv_data_filtered["terms"].values,
)
# Splitting the test set further into validation
# and new test sets.
val_df = test_df.sample(frac=0.5)
test_df.drop(val_df.index, inplace=True)
print(f"Number of rows in training set: {len(train_df)}")
print(f"Number of rows in validation set: {len(val_df)}")
print(f"Number of rows in test set: {len(test_df)}")<jupyter_output><empty_output><jupyter_text>Multi-label binarizationNow we preprocess our labels using the[`StringLookup`](https://keras.io/api/layers/preprocessing_layers/categorical/string_lookup)layer.<jupyter_code>terms = tf.ragged.constant(train_df["terms"].values)
lookup = tf.keras.layers.StringLookup(output_mode="multi_hot")
lookup.adapt(terms)
vocab = lookup.get_vocabulary()
def invert_multi_hot(encoded_labels):
"""Reverse a single multi-hot encoded label to a tuple of vocab terms."""
hot_indices = np.argwhere(encoded_labels == 1.0)[..., 0]
return np.take(vocab, hot_indices)
print("Vocabulary:\n")
print(vocab)<jupyter_output><empty_output><jupyter_text>Here we are separating the individual unique classes available from the labelpool and then using this information to represent a given label set with 0's and 1's.Below is an example.<jupyter_code>sample_label = train_df["terms"].iloc[0]
print(f"Original label: {sample_label}")
label_binarized = lookup([sample_label])
print(f"Label-binarized representation: {label_binarized}")<jupyter_output><empty_output><jupyter_text>Data preprocessing and `tf.data.Dataset` objectsWe first get percentile estimates of the sequence lengths. The purpose will be clear in amoment.<jupyter_code>train_df["summaries"].apply(lambda x: len(x.split(" "))).describe()<jupyter_output><empty_output><jupyter_text>Notice that 50% of the abstracts have a length of 154 (you may get a different numberbased on the split). So, any number close to that value is a good enough approximate for themaximum sequence length.Now, we implement utilities to prepare our datasets.<jupyter_code>max_seqlen = 150
batch_size = 128
padding_token = "<pad>"
auto = tf.data.AUTOTUNE
def make_dataset(dataframe, is_train=True):
labels = tf.ragged.constant(dataframe["terms"].values)
label_binarized = lookup(labels).numpy()
dataset = tf.data.Dataset.from_tensor_slices(
(dataframe["summaries"].values, label_binarized)
)
dataset = dataset.shuffle(batch_size * 10) if is_train else dataset
return dataset.batch(batch_size)<jupyter_output><empty_output><jupyter_text>Now we can prepare the `tf.data.Dataset` objects.<jupyter_code>train_dataset = make_dataset(train_df, is_train=True)
validation_dataset = make_dataset(val_df, is_train=False)
test_dataset = make_dataset(test_df, is_train=False)<jupyter_output><empty_output><jupyter_text>Dataset preview<jupyter_code>text_batch, label_batch = next(iter(train_dataset))
for i, text in enumerate(text_batch[:5]):
label = label_batch[i].numpy()[None, ...]
print(f"Abstract: {text}")
print(f"Label(s): {invert_multi_hot(label[0])}")
print(" ")<jupyter_output><empty_output><jupyter_text>VectorizationBefore we feed the data to our model, we need to vectorize it (represent it in a numerical form).For that purpose, we will use the[`TextVectorization` layer](https://keras.io/api/layers/preprocessing_layers/text/text_vectorization).It can operate as a part of your main model so that the model is excluded from the corepreprocessing logic. This greatly reduces the chances of training / serving skew during inference.We first calculate the number of unique words present in the abstracts.<jupyter_code># Source: https://stackoverflow.com/a/18937309/7636462
vocabulary = set()
train_df["summaries"].str.lower().str.split().apply(vocabulary.update)
vocabulary_size = len(vocabulary)
print(vocabulary_size)<jupyter_output><empty_output><jupyter_text>We now create our vectorization layer and `map()` to the `tf.data.Dataset`s createdearlier.<jupyter_code>text_vectorizer = layers.TextVectorization(
max_tokens=vocabulary_size, ngrams=2, output_mode="tf_idf"
)
# `TextVectorization` layer needs to be adapted as per the vocabulary from our
# training set.
with tf.device("/CPU:0"):
text_vectorizer.adapt(train_dataset.map(lambda text, label: text))
train_dataset = train_dataset.map(
lambda text, label: (text_vectorizer(text), label), num_parallel_calls=auto
).prefetch(auto)
validation_dataset = validation_dataset.map(
lambda text, label: (text_vectorizer(text), label), num_parallel_calls=auto
).prefetch(auto)
test_dataset = test_dataset.map(
lambda text, label: (text_vectorizer(text), label), num_parallel_calls=auto
).prefetch(auto)<jupyter_output><empty_output><jupyter_text>A batch of raw text will first go through the `TextVectorization` layer and it willgenerate their integer representations. Internally, the `TextVectorization` layer willfirst create bi-grams out of the sequences and then represent them using[TF-IDF](https://wikipedia.org/wiki/Tf%E2%80%93idf). The output representations will thenbe passed to the shallow model responsible for text classification.To learn more about other possible configurations with `TextVectorizer`, please consultthe[official documentation](https://keras.io/api/layers/preprocessing_layers/text/text_vectorization).**Note**: Setting the `max_tokens` argument to a pre-calculated vocabulary size isnot a requirement. Create a text classification modelWe will keep our model simple -- it will be a small stack of fully-connected layers withReLU as the non-linearity.<jupyter_code>def make_model():
shallow_mlp_model = keras.Sequential(
[
layers.Dense(512, activation="relu"),
layers.Dense(256, activation="relu"),
layers.Dense(lookup.vocabulary_size(), activation="sigmoid"),
] # More on why "sigmoid" has been used here in a moment.
)
return shallow_mlp_model<jupyter_output><empty_output><jupyter_text>Train the modelWe will train our model using the binary crossentropy loss. This is because the labelsare not disjoint. For a given abstract, we may have multiple categories. So, we willdivide the prediction task into a series of multiple binary classification problems. Thisis also why we kept the activation function of the classification layer in our model tosigmoid. Researchers have used other combinations of loss function and activationfunction as well. For example, in [Exploring the Limits of Weakly Supervised Pretraining](https://arxiv.org/abs/1805.00932),Mahajan et al. used the softmax activation function and cross-entropy loss to traintheir models.There are several options of metrics that can be used in multi-label classification.To keep this code example narrow we decided to use the[binary accuracy metric](https://keras.io/api/metrics/accuracy_metrics/binaryaccuracy-class).To see the explanation why this metric is used we refer to this[pull-request](https://github.com/keras-team/keras-io/pull/1133issuecomment-1322736860).There are also other suitable metrics for multi-label classification, like[F1 Score](https://www.tensorflow.org/addons/api_docs/python/tfa/metrics/F1Score) or[Hamming loss](https://www.tensorflow.org/addons/api_docs/python/tfa/metrics/HammingLoss).<jupyter_code>epochs = 20
shallow_mlp_model = make_model()
shallow_mlp_model.compile(
loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"]
)
history = shallow_mlp_model.fit(
train_dataset, validation_data=validation_dataset, epochs=epochs
)
def plot_result(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_result("loss")
plot_result("binary_accuracy")<jupyter_output><empty_output><jupyter_text>While training, we notice an initial sharp fall in the loss followed by a gradual decay. Evaluate the model<jupyter_code>_, binary_acc = shallow_mlp_model.evaluate(test_dataset)
print(f"Categorical accuracy on the test set: {round(binary_acc * 100, 2)}%.")<jupyter_output><empty_output><jupyter_text>The trained model gives us an evaluation accuracy of ~99%. InferenceAn important feature of the[preprocessing layers provided by Keras](https://keras.io/guides/preprocessing_layers/)is that they can be included inside a `tf.keras.Model`. We will export an inference modelby including the `text_vectorization` layer on top of `shallow_mlp_model`. This willallow our inference model to directly operate on raw strings.**Note** that during training it is always preferable to use these preprocessinglayers as a part of the data input pipeline rather than the model to avoidsurfacing bottlenecks for the hardware accelerators. This also allows forasynchronous data processing.<jupyter_code># Create a model for inference.
model_for_inference = keras.Sequential([text_vectorizer, shallow_mlp_model])
# Create a small dataset just for demoing inference.
inference_dataset = make_dataset(test_df.sample(100), is_train=False)
text_batch, label_batch = next(iter(inference_dataset))
predicted_probabilities = model_for_inference.predict(text_batch)
# Perform inference.
for i, text in enumerate(text_batch[:5]):
label = label_batch[i].numpy()[None, ...]
print(f"Abstract: {text}")
print(f"Label(s): {invert_multi_hot(label[0])}")
predicted_proba = [proba for proba in predicted_probabilities[i]]
top_3_labels = [
x
for _, x in sorted(
zip(predicted_probabilities[i], lookup.get_vocabulary()),
key=lambda pair: pair[0],
reverse=True,
)
][:3]
print(f"Predicted Label(s): ({', '.join([label for label in top_3_labels])})")
print(" ")<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/multi_label_classification.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/multi_label_classification.ipynb",
"repo_id": "keras-io",
"token_count": 4679
} | 100 |
<jupyter_start><jupyter_text>Text classification with Transformer**Author:** [Apoorv Nandan](https://twitter.com/NandanApoorv)**Date created:** 2020/05/10**Last modified:** 2024/01/18**Description:** Implement a Transformer block as a Keras layer and use it for text classification. Setup<jupyter_code>import keras
from keras import ops
from keras import layers<jupyter_output><empty_output><jupyter_text>Implement a Transformer block as a layer<jupyter_code>class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super().__init__()
self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.ffn = keras.Sequential(
[layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output)
return self.layernorm2(out1 + ffn_output)<jupyter_output><empty_output><jupyter_text>Implement embedding layerTwo seperate embedding layers, one for tokens, one for token index (positions).<jupyter_code>class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super().__init__()
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = ops.shape(x)[-1]
positions = ops.arange(start=0, stop=maxlen, step=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions<jupyter_output><empty_output><jupyter_text>Download and prepare dataset<jupyter_code>vocab_size = 20000 # Only consider the top 20k words
maxlen = 200 # Only consider the first 200 words of each movie review
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size)
print(len(x_train), "Training sequences")
print(len(x_val), "Validation sequences")
x_train = keras.utils.pad_sequences(x_train, maxlen=maxlen)
x_val = keras.utils.pad_sequences(x_val, maxlen=maxlen)<jupyter_output><empty_output><jupyter_text>Create classifier model using transformer layerTransformer layer outputs one vector for each time step of our input sequence.Here, we take the mean across all time steps anduse a feed forward network on top of it to classify text.<jupyter_code>embed_dim = 32 # Embedding size for each token
num_heads = 2 # Number of attention heads
ff_dim = 32 # Hidden layer size in feed forward network inside transformer
inputs = layers.Input(shape=(maxlen,))
embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
x = embedding_layer(inputs)
transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)
x = transformer_block(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(20, activation="relu")(x)
x = layers.Dropout(0.1)(x)
outputs = layers.Dense(2, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)<jupyter_output><empty_output><jupyter_text>Train and Evaluate<jupyter_code>model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
history = model.fit(
x_train, y_train, batch_size=32, epochs=2, validation_data=(x_val, y_val)
)<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/text_classification_with_transformer.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/text_classification_with_transformer.ipynb",
"repo_id": "keras-io",
"token_count": 1438
} | 101 |
# MultipleChoice Task with Transfer Learning
**Author:** Md Awsafur Rahman<br>
**Date created:** 2023/09/14<br>
**Last modified:** 2023/09/14<br>
**Description:** Use pre-trained nlp models for multiplechoice task.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/multiple_choice_task_with_transfer_learning.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/multiple_choice_task_with_transfer_learning.py)
---
## Introduction
In this example, we will demonstrate how to perform the **MultipleChoice** task by
finetuning pre-trained DebertaV3 model. In this task, several candidate answers are
provided along with a context and the model is trained to select the correct answer
unlike question answering. We will use SWAG dataset to demonstrate this example.
---
## Setup
```python
import keras_nlp
import keras
import tensorflow as tf # For tf.data only.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
---
## Dataset
In this example we'll use **SWAG** dataset for multiplechoice task.
```python
!wget "https://github.com/rowanz/swagaf/archive/refs/heads/master.zip" -O swag.zip
!unzip -q swag.zip
```
<div class="k-default-codeblock">
```
--2023-11-13 20:05:24-- https://github.com/rowanz/swagaf/archive/refs/heads/master.zip
Resolving github.com (github.com)... 192.30.255.113
Connecting to github.com (github.com)|192.30.255.113|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://codeload.github.com/rowanz/swagaf/zip/refs/heads/master [following]
--2023-11-13 20:05:25-- https://codeload.github.com/rowanz/swagaf/zip/refs/heads/master
Resolving codeload.github.com (codeload.github.com)... 20.29.134.24
Connecting to codeload.github.com (codeload.github.com)|20.29.134.24|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [application/zip]
Saving to: ‘swag.zip’
```
</div>
<div class="k-default-codeblock">
```
swag.zip [ <=> ] 19.94M 4.25MB/s in 4.7s
```
</div>
<div class="k-default-codeblock">
```
2023-11-13 20:05:30 (4.25 MB/s) - ‘swag.zip’ saved [20905751]
```
</div>
```python
!ls swagaf-master/data
```
<div class="k-default-codeblock">
```
README.md test.csv train.csv train_full.csv val.csv val_full.csv
```
</div>
---
## Configuration
```python
class CFG:
preset = "deberta_v3_extra_small_en" # Name of pretrained models
sequence_length = 200 # Input sequence length
seed = 42 # Random seed
epochs = 5 # Training epochs
batch_size = 8 # Batch size
augment = True # Augmentation (Shuffle Options)
```
---
## Reproducibility
Sets value for random seed to produce similar result in each run.
```python
keras.utils.set_random_seed(CFG.seed)
```
---
## Meta Data
* **train.csv** - will be used for training.
* `sent1` and `sent2`: these fields show how a sentence starts, and if you put the two
together, you get the `startphrase` field.
* `ending_<i>`: suggests a possible ending for how a sentence can end, but only one of
them is correct.
* `label`: identifies the correct sentence ending.
* **val.csv** - similar to `train.csv` but will be used for validation.
```python
# Train data
train_df = pd.read_csv(
"swagaf-master/data/train.csv", index_col=0
) # Read CSV file into a DataFrame
train_df = train_df.sample(frac=0.02)
print("# Train Data: {:,}".format(len(train_df)))
# Valid data
valid_df = pd.read_csv(
"swagaf-master/data/val.csv", index_col=0
) # Read CSV file into a DataFrame
valid_df = valid_df.sample(frac=0.02)
print("# Valid Data: {:,}".format(len(valid_df)))
```
<div class="k-default-codeblock">
```
# Train Data: 1,471
# Valid Data: 400
```
</div>
---
## Contextualize Options
Our approach entails furnishing the model with question and answer pairs, as opposed to
employing a single question for all five options. In practice, this signifies that for
the five options, we will supply the model with the same set of five questions combined
with each respective answer choice (e.g., `(Q + A)`, `(Q + B)`, and so on). This analogy
draws parallels to the practice of revisiting a question multiple times during an exam to
promote a deeper understanding of the problem at hand.
> Notably, in the context of SWAG dataset, question is the start of a sentence and
options are possible ending of that sentence.
```python
# Define a function to create options based on the prompt and choices
def make_options(row):
row["options"] = [
f"{row.startphrase}\n{row.ending0}", # Option 0
f"{row.startphrase}\n{row.ending1}", # Option 1
f"{row.startphrase}\n{row.ending2}", # Option 2
f"{row.startphrase}\n{row.ending3}",
] # Option 3
return row
```
Apply the `make_options` function to each row of the dataframe
```python
train_df = train_df.apply(make_options, axis=1)
valid_df = valid_df.apply(make_options, axis=1)
```
---
## Preprocessing
**What it does:** The preprocessor takes input strings and transforms them into a
dictionary (`token_ids`, `padding_mask`) containing preprocessed tensors. This process
starts with tokenization, where input strings are converted into sequences of token IDs.
**Why it's important:** Initially, raw text data is complex and challenging for modeling
due to its high dimensionality. By converting text into a compact set of tokens, such as
transforming `"The quick brown fox"` into `["the", "qu", "##ick", "br", "##own", "fox"]`,
we simplify the data. Many models rely on special tokens and additional tensors to
understand input. These tokens help divide input and identify padding, among other tasks.
Making all sequences the same length through padding boosts computational efficiency,
making subsequent steps smoother.
Explore the following pages to access the available preprocessing and tokenizer layers in
**KerasNLP**:
- [Preprocessing](https://keras.io/api/keras_nlp/preprocessing_layers/)
- [Tokenizers](https://keras.io/api/keras_nlp/tokenizers/)
```python
preprocessor = keras_nlp.models.DebertaV3Preprocessor.from_preset(
preset=CFG.preset, # Name of the model
sequence_length=CFG.sequence_length, # Max sequence length, will be padded if shorter
)
```
Now, let's examine what the output shape of the preprocessing layer looks like. The
output shape of the layer can be represented as $(num\_choices, sequence\_length)$.
```python
outs = preprocessor(train_df.options.iloc[0]) # Process options for the first row
# Display the shape of each processed output
for k, v in outs.items():
print(k, ":", v.shape)
```
<div class="k-default-codeblock">
```
CUDA backend failed to initialize: Found CUDA version 12010, but JAX was built against version 12020, which is newer. The copy of CUDA that is installed must be at least as new as the version against which JAX was built. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
token_ids : (4, 200)
padding_mask : (4, 200)
```
</div>
We'll use the `preprocessing_fn` function to transform each text option using the
`dataset.map(preprocessing_fn)` method.
```python
def preprocess_fn(text, label=None):
text = preprocessor(text) # Preprocess text
return (
(text, label) if label is not None else text
) # Return processed text and label if available
```
---
## Augmentation
In this notebook, we'll experiment with an interesting augmentation technique,
`option_shuffle`. Since we're providing the model with one option at a time, we can
introduce a shuffle to the order of options. For instance, options `[A, C, E, D, B]`
would be rearranged as `[D, B, A, E, C]`. This practice will help the model focus on the
content of the options themselves, rather than being influenced by their positions.
**Note:** Even though `option_shuffle` function is written in pure
tensorflow, it can be used with any backend (e.g. JAX, PyTorch) as it is only used
in `tf.data.Dataset` pipeline which is compatible with Keras 3 routines.
```python
def option_shuffle(options, labels, prob=0.50, seed=None):
if tf.random.uniform([]) > prob: # Shuffle probability check
return options, labels
# Shuffle indices of options and labels in the same order
indices = tf.random.shuffle(tf.range(tf.shape(options)[0]), seed=seed)
# Shuffle options and labels
options = tf.gather(options, indices)
labels = tf.gather(labels, indices)
return options, labels
```
In the following function, we'll merge all augmentation functions to apply to the text.
These augmentations will be applied to the data using the `dataset.map(augment_fn)`
approach.
```python
def augment_fn(text, label=None):
text, label = option_shuffle(text, label, prob=0.5) # Shuffle the options
return (text, label) if label is not None else text
```
---
## DataLoader
The code below sets up a robust data flow pipeline using `tf.data.Dataset` for data
processing. Notable aspects of `tf.data` include its ability to simplify pipeline
construction and represent components in sequences.
To learn more about `tf.data`, refer to this
[documentation](https://www.tensorflow.org/guide/data).
```python
def build_dataset(
texts,
labels=None,
batch_size=32,
cache=False,
augment=False,
repeat=False,
shuffle=1024,
):
AUTO = tf.data.AUTOTUNE # AUTOTUNE option
slices = (
(texts,)
if labels is None
else (texts, keras.utils.to_categorical(labels, num_classes=4))
) # Create slices
ds = tf.data.Dataset.from_tensor_slices(slices) # Create dataset from slices
ds = ds.cache() if cache else ds # Cache dataset if enabled
if augment: # Apply augmentation if enabled
ds = ds.map(augment_fn, num_parallel_calls=AUTO)
ds = ds.map(preprocess_fn, num_parallel_calls=AUTO) # Map preprocessing function
ds = ds.repeat() if repeat else ds # Repeat dataset if enabled
opt = tf.data.Options() # Create dataset options
if shuffle:
ds = ds.shuffle(shuffle, seed=CFG.seed) # Shuffle dataset if enabled
opt.experimental_deterministic = False
ds = ds.with_options(opt) # Set dataset options
ds = ds.batch(batch_size, drop_remainder=True) # Batch dataset
ds = ds.prefetch(AUTO) # Prefetch next batch
return ds # Return the built dataset
```
Now let's create train and valid dataloader using above funciton.
```python
# Build train dataloader
train_texts = train_df.options.tolist() # Extract training texts
train_labels = train_df.label.tolist() # Extract training labels
train_ds = build_dataset(
train_texts,
train_labels,
batch_size=CFG.batch_size,
cache=True,
shuffle=True,
repeat=True,
augment=CFG.augment,
)
# Build valid dataloader
valid_texts = valid_df.options.tolist() # Extract validation texts
valid_labels = valid_df.label.tolist() # Extract validation labels
valid_ds = build_dataset(
valid_texts,
valid_labels,
batch_size=CFG.batch_size,
cache=True,
shuffle=False,
repeat=False,
augment=False,
)
```
---
## LR Schedule
Implementing a learning rate scheduler is crucial for transfer learning. The learning
rate initiates at `lr_start` and gradually tapers down to `lr_min` using **cosine**
curve.
**Importance:** A well-structured learning rate schedule is essential for efficient model
training, ensuring optimal convergence and avoiding issues such as overshooting or
stagnation.
```python
import math
def get_lr_callback(batch_size=8, mode="cos", epochs=10, plot=False):
lr_start, lr_max, lr_min = 1.0e-6, 0.6e-6 * batch_size, 1e-6
lr_ramp_ep, lr_sus_ep = 2, 0
def lrfn(epoch): # Learning rate update function
if epoch < lr_ramp_ep:
lr = (lr_max - lr_start) / lr_ramp_ep * epoch + lr_start
elif epoch < lr_ramp_ep + lr_sus_ep:
lr = lr_max
else:
decay_total_epochs, decay_epoch_index = (
epochs - lr_ramp_ep - lr_sus_ep + 3,
epoch - lr_ramp_ep - lr_sus_ep,
)
phase = math.pi * decay_epoch_index / decay_total_epochs
lr = (lr_max - lr_min) * 0.5 * (1 + math.cos(phase)) + lr_min
return lr
if plot: # Plot lr curve if plot is True
plt.figure(figsize=(10, 5))
plt.plot(
np.arange(epochs),
[lrfn(epoch) for epoch in np.arange(epochs)],
marker="o",
)
plt.xlabel("epoch")
plt.ylabel("lr")
plt.title("LR Scheduler")
plt.show()
return keras.callbacks.LearningRateScheduler(
lrfn, verbose=False
) # Create lr callback
_ = get_lr_callback(CFG.batch_size, plot=True)
```

---
## Callbacks
The function below will gather all the training callbacks, such as `lr_scheduler`,
`model_checkpoint`.
```python
def get_callbacks():
callbacks = []
lr_cb = get_lr_callback(CFG.batch_size) # Get lr callback
ckpt_cb = keras.callbacks.ModelCheckpoint(
f"best.keras",
monitor="val_accuracy",
save_best_only=True,
save_weights_only=False,
mode="max",
) # Get Model checkpoint callback
callbacks.extend([lr_cb, ckpt_cb]) # Add lr and checkpoint callbacks
return callbacks # Return the list of callbacks
callbacks = get_callbacks()
```
---
## MultipleChoice Model
### Pre-trained Models
The `KerasNLP` library provides comprehensive, ready-to-use implementations of popular
NLP model architectures. It features a variety of pre-trained models including `Bert`,
`Roberta`, `DebertaV3`, and more. In this notebook, we'll showcase the usage of
`DistillBert`. However, feel free to explore all available models in the [KerasNLP
documentation](https://keras.io/api/keras_nlp/models/). Also for a deeper understanding
of `KerasNLP`, refer to the informative [getting started
guide](https://keras.io/guides/keras_nlp/getting_started/).
Our approach involves using `keras_nlp.models.XXClassifier` to process each question and
option pari (e.g. (Q+A), (Q+B), etc.), generating logits. These logits are then combined
and passed through a softmax function to produce the final output.
### Classifier for Multiple-Choice Tasks
When dealing with multiple-choice questions, instead of giving the model the question and
all options together `(Q + A + B + C ...)`, we provide the model with one option at a
time along with the question. For instance, `(Q + A)`, `(Q + B)`, and so on. Once we have
the prediction scores (logits) for all options, we combine them using the `Softmax`
function to get the ultimate result. If we had given all options at once to the model,
the text's length would increase, making it harder for the model to handle. The picture
below illustrates this idea:

<div align="center"><b> Picture Credict: </b> <a
href="https://twitter.com/johnowhitaker"> @johnowhitaker </a> </div></div><br>
From a coding perspective, remember that we use the same model for all five options, with
shared weights. Despite the figure suggesting five separate models, they are, in fact,
one model with shared weights. Another point to consider is the the input shapes of
Classifier and MultipleChoice.
* Input shape for **Multiple Choice**: $(batch\_size, num\_choices, seq\_length)$
* Input shape for **Classifier**: $(batch\_size, seq\_length)$
Certainly, it's clear that we can't directly give the data for the multiple-choice task
to the model because the input shapes don't match. To handle this, we'll use **slicing**.
This means we'll separate the features of each option, like $feature_{(Q + A)}$ and
$feature_{(Q + B)}$, and give them one by one to the NLP classifier. After we get the
prediction scores $logits_{(Q + A)}$ and $logits_{(Q + B)}$ for all the options, we'll
use the Softmax function, like $\operatorname{Softmax}([logits_{(Q + A)}, logits_{(Q +
B)}])$, to combine them. This final step helps us make the ultimate decision or choice.
> Note that in the classifier, we set `num_classes=1` instead of `5`. This is because the
classifier produces a single output for each option. When dealing with five options,
these individual outputs are joined together and then processed through a softmax
function to generate the final result, which has a dimension of `5`.
```python
# Selects one option from five
class SelectOption(keras.layers.Layer):
def __init__(self, index, **kwargs):
super().__init__(**kwargs)
self.index = index
def call(self, inputs):
# Selects a specific slice from the inputs tensor
return inputs[:, self.index, :]
def get_config(self):
# For serialize the model
base_config = super().get_config()
config = {
"index": self.index,
}
return {**base_config, **config}
def build_model():
# Define input layers
inputs = {
"token_ids": keras.Input(shape=(4, None), dtype="int32", name="token_ids"),
"padding_mask": keras.Input(
shape=(4, None), dtype="int32", name="padding_mask"
),
}
# Create a DebertaV3Classifier model
classifier = keras_nlp.models.DebertaV3Classifier.from_preset(
CFG.preset,
preprocessor=None,
num_classes=1, # one output per one option, for five options total 5 outputs
)
logits = []
# Loop through each option (Q+A), (Q+B) etc and compute associted logits
for option_idx in range(4):
option = {
k: SelectOption(option_idx, name=f"{k}_{option_idx}")(v)
for k, v in inputs.items()
}
logit = classifier(option)
logits.append(logit)
# Compute final output
logits = keras.layers.Concatenate(axis=-1)(logits)
outputs = keras.layers.Softmax(axis=-1)(logits)
model = keras.Model(inputs, outputs)
# Compile the model with optimizer, loss, and metrics
model.compile(
optimizer=keras.optimizers.AdamW(5e-6),
loss=keras.losses.CategoricalCrossentropy(label_smoothing=0.02),
metrics=[
keras.metrics.CategoricalAccuracy(name="accuracy"),
],
jit_compile=True,
)
return model
# Build the Build
model = build_model()
```
Let's checkout the model summary to have a better insight on the model.
```python
model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃
┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩
│ padding_mask │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ token_ids │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ padding_mask_0 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ padding_mask[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">SelectOption</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ token_ids_0 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ token_ids[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">SelectOption</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ padding_mask_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ padding_mask[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">SelectOption</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ token_ids_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ token_ids[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">SelectOption</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ padding_mask_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ padding_mask[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">SelectOption</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ token_ids_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ token_ids[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">SelectOption</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ padding_mask_3 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ padding_mask[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">SelectOption</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ token_ids_3 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ token_ids[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">SelectOption</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ deberta_v3_classif… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">70,830…</span> │ padding_mask_0[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">DebertaV3Classifi…</span> │ │ │ token_ids_0[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ │ │ │ padding_mask_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>… │
│ │ │ │ token_ids_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ │ │ │ padding_mask_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>… │
│ │ │ │ token_ids_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ │ │ │ padding_mask_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>… │
│ │ │ │ token_ids_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ concatenate │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ deberta_v3_classifi… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Concatenate</span>) │ │ │ deberta_v3_classifi… │
│ │ │ │ deberta_v3_classifi… │
│ │ │ │ deberta_v3_classifi… │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ softmax (<span style="color: #0087ff; text-decoration-color: #0087ff">Softmax</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ concatenate[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
└─────────────────────┴───────────────────┴─────────┴──────────────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">70,830,337</span> (270.20 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">70,830,337</span> (270.20 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
Finally, let's check the model structure visually if everything is in place.
```python
keras.utils.plot_model(model, show_shapes=True)
```

---
## Training
```python
# Start training the model
history = model.fit(
train_ds,
epochs=CFG.epochs,
validation_data=valid_ds,
callbacks=callbacks,
steps_per_epoch=int(len(train_df) / CFG.batch_size),
verbose=1,
)
```
<div class="k-default-codeblock">
```
Epoch 1/5
183/183 ━━━━━━━━━━━━━━━━━━━━ 5087s 25s/step - accuracy: 0.2563 - loss: 1.3884 - val_accuracy: 0.5150 - val_loss: 1.3742 - learning_rate: 1.0000e-06
Epoch 2/5
183/183 ━━━━━━━━━━━━━━━━━━━━ 4529s 25s/step - accuracy: 0.3825 - loss: 1.3364 - val_accuracy: 0.7125 - val_loss: 0.9071 - learning_rate: 2.9000e-06
Epoch 3/5
183/183 ━━━━━━━━━━━━━━━━━━━━ 4524s 25s/step - accuracy: 0.6144 - loss: 1.0118 - val_accuracy: 0.7425 - val_loss: 0.8017 - learning_rate: 4.8000e-06
Epoch 4/5
183/183 ━━━━━━━━━━━━━━━━━━━━ 4522s 25s/step - accuracy: 0.6744 - loss: 0.8460 - val_accuracy: 0.7625 - val_loss: 0.7323 - learning_rate: 4.7230e-06
Epoch 5/5
183/183 ━━━━━━━━━━━━━━━━━━━━ 4517s 25s/step - accuracy: 0.7200 - loss: 0.7458 - val_accuracy: 0.7750 - val_loss: 0.7022 - learning_rate: 4.4984e-06
```
</div>
---
## Inference
```python
# Make predictions using the trained model on last validation data
predictions = model.predict(
valid_ds,
batch_size=CFG.batch_size, # max batch size = valid size
verbose=1,
)
# Format predictions and true answers
pred_answers = np.arange(4)[np.argsort(-predictions)][:, 0]
true_answers = valid_df.label.values
# Check 5 Predictions
print("# Predictions\n")
for i in range(0, 50, 10):
row = valid_df.iloc[i]
question = row.startphrase
pred_answer = f"ending{pred_answers[i]}"
true_answer = f"ending{true_answers[i]}"
print(f"❓ Sentence {i+1}:\n{question}\n")
print(f"✅ True Ending: {true_answer}\n >> {row[true_answer]}\n")
print(f"🤖 Predicted Ending: {pred_answer}\n >> {row[pred_answer]}\n")
print("-" * 90, "\n")
```
<div class="k-default-codeblock">
```
50/50 ━━━━━━━━━━━━━━━━━━━━ 274s 5s/step
# Predictions
```
</div>
<div class="k-default-codeblock">
```
❓ Sentence 1:
The man shows the teens how to move the oars. The teens
```
</div>
<div class="k-default-codeblock">
```
✅ True Ending: ending3
>> follow the instructions of the man and row the oars.
```
</div>
<div class="k-default-codeblock">
```
🤖 Predicted Ending: ending3
>> follow the instructions of the man and row the oars.
```
</div>
<div class="k-default-codeblock">
```
------------------------------------------------------------------------------------------
```
</div>
<div class="k-default-codeblock">
```
❓ Sentence 11:
A lake reflects the mountains and the sky. Someone
```
</div>
<div class="k-default-codeblock">
```
✅ True Ending: ending2
>> runs along a desert highway.
```
</div>
<div class="k-default-codeblock">
```
🤖 Predicted Ending: ending1
>> remains by the door.
```
</div>
<div class="k-default-codeblock">
```
------------------------------------------------------------------------------------------
```
</div>
<div class="k-default-codeblock">
```
❓ Sentence 21:
On screen, she smiles as someone holds up a present. He watches somberly as on screen, his mother
```
</div>
<div class="k-default-codeblock">
```
✅ True Ending: ending1
>> picks him up and plays with him in the garden.
```
</div>
<div class="k-default-codeblock">
```
🤖 Predicted Ending: ending0
>> comes out of her apartment, glowers at her laptop.
```
</div>
<div class="k-default-codeblock">
```
------------------------------------------------------------------------------------------
```
</div>
<div class="k-default-codeblock">
```
❓ Sentence 31:
A woman in a black shirt is sitting on a bench. A man
```
</div>
<div class="k-default-codeblock">
```
✅ True Ending: ending2
>> sits behind a desk.
```
</div>
<div class="k-default-codeblock">
```
🤖 Predicted Ending: ending0
>> is dancing on a stage.
```
</div>
<div class="k-default-codeblock">
```
------------------------------------------------------------------------------------------
```
</div>
<div class="k-default-codeblock">
```
❓ Sentence 41:
People are standing on sand wearing red shirts. They
```
</div>
<div class="k-default-codeblock">
```
✅ True Ending: ending3
>> are playing a game of soccer in the sand.
```
</div>
<div class="k-default-codeblock">
```
🤖 Predicted Ending: ending3
>> are playing a game of soccer in the sand.
```
</div>
<div class="k-default-codeblock">
```
------------------------------------------------------------------------------------------
```
</div>
---
## Reference
* [Multiple Choice with
HF](https://twitter.com/johnowhitaker/status/1689790373454041089?s=20)
* [Keras NLP](https://keras.io/api/keras_nlp/)
* [BirdCLEF23: Pretraining is All you Need
[Train]](https://www.kaggle.com/code/awsaf49/birdclef23-pretraining-is-all-you-need-train)
[Train]](https://www.kaggle.com/code/awsaf49/birdclef23-pretraining-is-all-you-need-train)
* [Triple Stratified KFold with
TFRecords](https://www.kaggle.com/code/cdeotte/triple-stratified-kfold-with-tfrecords)
| keras-io/examples/nlp/md/multiple_choice_task_with_transfer_learning.md/0 | {
"file_path": "keras-io/examples/nlp/md/multiple_choice_task_with_transfer_learning.md",
"repo_id": "keras-io",
"token_count": 14028
} | 102 |
# Text classification using Decision Forests and pretrained embeddings
**Author:** Gitesh Chawda<br>
**Date created:** 09/05/2022<br>
**Last modified:** 09/05/2022<br>
**Description:** Using Tensorflow Decision Forests for text classification.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/tweet-classification-using-tfdf.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/tweet-classification-using-tfdf.py)
---
## Introduction
[TensorFlow Decision Forests](https://www.tensorflow.org/decision_forests) (TF-DF)
is a collection of state-of-the-art algorithms for Decision Forest models that are
compatible with Keras APIs. The module includes Random Forests, Gradient Boosted Trees,
and CART, and can be used for regression, classification, and ranking tasks.
In this example we will use Gradient Boosted Trees with pretrained embeddings to
classify disaster-related tweets.
### See also:
- [TF-DF beginner tutorial](https://www.tensorflow.org/decision_forests/tutorials/beginner_colab)
- [TF-DF intermediate tutorial](https://www.tensorflow.org/decision_forests/tutorials/intermediate_colab).
Install Tensorflow Decision Forest using following command :
`pip install tensorflow_decision_forests`
---
## Imports
```python
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
from tensorflow.keras import layers
import tensorflow_decision_forests as tfdf
import matplotlib.pyplot as plt
```
---
## Get the data
The Dataset is avalaible on [Kaggle](https://www.kaggle.com/c/nlp-getting-started)
Dataset description:
**Files:**
- train.csv: the training set
**Columns:**
- id: a unique identifier for each tweet
- text: the text of the tweet
- location: the location the tweet was sent from (may be blank)
- keyword: a particular keyword from the tweet (may be blank)
- target: in train.csv only, this denotes whether a tweet is about a real disaster (1) or not (0)
```python
# Turn .csv files into pandas DataFrame's
df = pd.read_csv(
"https://raw.githubusercontent.com/IMvision12/Tweets-Classification-NLP/main/train.csv"
)
print(df.head())
```
<div class="k-default-codeblock">
```
id keyword location text \
0 1 NaN NaN Our Deeds are the Reason of this #earthquake M...
1 4 NaN NaN Forest fire near La Ronge Sask. Canada
2 5 NaN NaN All residents asked to 'shelter in place' are ...
3 6 NaN NaN 13,000 people receive #wildfires evacuation or...
4 7 NaN NaN Just got sent this photo from Ruby #Alaska as ...
```
</div>
<div class="k-default-codeblock">
```
target
0 1
1 1
2 1
3 1
4 1
```
</div>
The dataset includes 7613 samples with 5 columns:
```python
print(f"Training dataset shape: {df.shape}")
```
<div class="k-default-codeblock">
```
Training dataset shape: (7613, 5)
```
</div>
Shuffling and dropping unnecessary columns:
```python
df_shuffled = df.sample(frac=1, random_state=42)
# Dropping id, keyword and location columns as these columns consists of mostly nan values
# we will be using only text and target columns
df_shuffled.drop(["id", "keyword", "location"], axis=1, inplace=True)
df_shuffled.reset_index(inplace=True, drop=True)
print(df_shuffled.head())
```
<div class="k-default-codeblock">
```
text target
0 So you have a new weapon that can cause un-ima... 1
1 The f$&@ing things I do for #GISHWHES Just... 0
2 DT @georgegalloway: RT @Galloway4Mayor: ÛÏThe... 1
3 Aftershock back to school kick off was great. ... 0
4 in response to trauma Children of Addicts deve... 0
```
</div>
Printing information about the shuffled dataframe:
```python
print(df_shuffled.info())
```
<div class="k-default-codeblock">
```
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 7613 entries, 0 to 7612
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 text 7613 non-null object
1 target 7613 non-null int64
dtypes: int64(1), object(1)
memory usage: 119.1+ KB
None
```
</div>
Total number of "disaster" and "non-disaster" tweets:
```python
print(
"Total Number of disaster and non-disaster tweets: "
f"{df_shuffled.target.value_counts()}"
)
```
<div class="k-default-codeblock">
```
Total Number of disaster and non-disaster tweets: 0 4342
1 3271
Name: target, dtype: int64
```
</div>
Let's preview a few samples:
```python
for index, example in df_shuffled[:5].iterrows():
print(f"Example #{index}")
print(f"\tTarget : {example['target']}")
print(f"\tText : {example['text']}")
```
<div class="k-default-codeblock">
```
Example #0
Target : 1
Text : So you have a new weapon that can cause un-imaginable destruction.
Example #1
Target : 0
Text : The f$&@ing things I do for #GISHWHES Just got soaked in a deluge going for pads and tampons. Thx @mishacollins @/@
Example #2
Target : 1
Text : DT @georgegalloway: RT @Galloway4Mayor: ÛÏThe CoL police can catch a pickpocket in Liverpool Stree... http://t.co/vXIn1gOq4Q
Example #3
Target : 0
Text : Aftershock back to school kick off was great. I want to thank everyone for making it possible. What a great night.
Example #4
Target : 0
Text : in response to trauma Children of Addicts develop a defensive self - one that decreases vulnerability. (3
```
</div>
Splitting dataset into training and test sets:
```python
test_df = df_shuffled.sample(frac=0.1, random_state=42)
train_df = df_shuffled.drop(test_df.index)
print(f"Using {len(train_df)} samples for training and {len(test_df)} for validation")
```
<div class="k-default-codeblock">
```
Using 6852 samples for training and 761 for validation
```
</div>
Total number of "disaster" and "non-disaster" tweets in the training data:
```python
print(train_df["target"].value_counts())
```
<div class="k-default-codeblock">
```
0 3929
1 2923
Name: target, dtype: int64
```
</div>
Total number of "disaster" and "non-disaster" tweets in the test data:
```python
print(test_df["target"].value_counts())
```
<div class="k-default-codeblock">
```
0 413
1 348
Name: target, dtype: int64
```
</div>
---
## Convert data to a `tf.data.Dataset`
```python
def create_dataset(dataframe):
dataset = tf.data.Dataset.from_tensor_slices(
(dataframe["text"].to_numpy(), dataframe["target"].to_numpy())
)
dataset = dataset.batch(100)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
train_ds = create_dataset(train_df)
test_ds = create_dataset(test_df)
```
---
## Downloading pretrained embeddings
The Universal Sentence Encoder embeddings encode text into high-dimensional vectors that can be
used for text classification, semantic similarity, clustering and other natural language
tasks. They're trained on a variety of data sources and a variety of tasks. Their input is
variable-length English text and their output is a 512 dimensional vector.
To learn more about these pretrained embeddings, see
[Universal Sentence Encoder](https://tfhub.dev/google/universal-sentence-encoder/4).
```python
sentence_encoder_layer = hub.KerasLayer(
"https://tfhub.dev/google/universal-sentence-encoder/4"
)
```
---
## Creating our models
We create two models. In the first model (model_1) raw text will be first encoded via
pretrained embeddings and then passed to a Gradient Boosted Tree model for
classification. In the second model (model_2) raw text will be directly passed to
the Gradient Boosted Trees model.
Building model_1
```python
inputs = layers.Input(shape=(), dtype=tf.string)
outputs = sentence_encoder_layer(inputs)
preprocessor = keras.Model(inputs=inputs, outputs=outputs)
model_1 = tfdf.keras.GradientBoostedTreesModel(preprocessing=preprocessor)
```
<div class="k-default-codeblock">
```
Use /tmp/tmpsp7fmsyk as temporary training directory
```
</div>
Building model_2
```python
model_2 = tfdf.keras.GradientBoostedTreesModel()
```
<div class="k-default-codeblock">
```
Use /tmp/tmpl0zj3vw0 as temporary training directory
```
</div>
---
## Train the models
We compile our model by passing the metrics `Accuracy`, `Recall`, `Precision` and
`AUC`. When it comes to the loss, TF-DF automatically detects the best loss for the task
(Classification or regression). It is printed in the model summary.
Also, because they're batch-training models rather than mini-batch gradient descent models,
TF-DF models do not need a validation dataset to monitor overfitting, or to stop
training early. Some algorithms do not use a validation dataset (e.g. Random Forest)
while some others do (e.g. Gradient Boosted Trees). If a validation dataset is
needed, it will be extracted automatically from the training dataset.
```python
# Compiling model_1
model_1.compile(metrics=["Accuracy", "Recall", "Precision", "AUC"])
# Here we do not specify epochs as, TF-DF trains exactly one epoch of the dataset
model_1.fit(train_ds)
# Compiling model_2
model_2.compile(metrics=["Accuracy", "Recall", "Precision", "AUC"])
# Here we do not specify epochs as, TF-DF trains exactly one epoch of the dataset
model_2.fit(train_ds)
```
<div class="k-default-codeblock">
```
Reading training dataset...
Training dataset read in 0:00:06.473683. Found 6852 examples.
Training model...
Model trained in 0:00:41.461477
Compiling model...
Model compiled.
Reading training dataset...
Training dataset read in 0:00:00.087930. Found 6852 examples.
Training model...
Model trained in 0:00:00.367492
Compiling model...
Model compiled.
<keras.callbacks.History at 0x7fe09ded1b40>
```
</div>
Prints training logs of model_1
```python
logs_1 = model_1.make_inspector().training_logs()
print(logs_1)
```
<div class="k-default-codeblock">
```
```
</div>
Prints training logs of model_2
```python
logs_2 = model_2.make_inspector().training_logs()
print(logs_2)
```
<div class="k-default-codeblock">
```
```
</div>
The model.summary() method prints a variety of information about your decision tree model, including model type, task, input features, and feature importance.
```python
print("model_1 summary: ")
print(model_1.summary())
print()
print("model_2 summary: ")
print(model_2.summary())
```
<div class="k-default-codeblock">
```
model_1 summary:
Model: "gradient_boosted_trees_model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
model (Functional) (None, 512) 256797824
=================================================================
Total params: 256,797,825
Trainable params: 0
Non-trainable params: 256,797,825
_________________________________________________________________
Type: "GRADIENT_BOOSTED_TREES"
Task: CLASSIFICATION
Label: "__LABEL"
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
No weights
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
Loss: BINOMIAL_LOG_LIKELIHOOD
Validation loss value: 0.806777
Number of trees per iteration: 1
Node format: NOT_SET
Number of trees: 137
Total number of nodes: 6671
```
</div>
<div class="k-default-codeblock">
```
Number of nodes by tree:
Count: 137 Average: 48.6934 StdDev: 9.91023
Min: 21 Max: 63 Ignored: 0
----------------------------------------------
[ 21, 23) 1 0.73% 0.73%
[ 23, 25) 1 0.73% 1.46%
[ 25, 27) 0 0.00% 1.46%
[ 27, 29) 1 0.73% 2.19%
[ 29, 31) 3 2.19% 4.38% #
[ 31, 33) 3 2.19% 6.57% #
[ 33, 36) 9 6.57% 13.14% ####
[ 36, 38) 4 2.92% 16.06% ##
[ 38, 40) 4 2.92% 18.98% ##
[ 40, 42) 8 5.84% 24.82% ####
[ 42, 44) 8 5.84% 30.66% ####
[ 44, 46) 9 6.57% 37.23% ####
[ 46, 48) 7 5.11% 42.34% ###
[ 48, 51) 10 7.30% 49.64% #####
[ 51, 53) 13 9.49% 59.12% ######
[ 53, 55) 10 7.30% 66.42% #####
[ 55, 57) 10 7.30% 73.72% #####
[ 57, 59) 6 4.38% 78.10% ###
[ 59, 61) 8 5.84% 83.94% ####
[ 61, 63] 22 16.06% 100.00% ##########
```
</div>
<div class="k-default-codeblock">
```
Depth by leafs:
Count: 3404 Average: 4.81052 StdDev: 0.557183
Min: 1 Max: 5 Ignored: 0
----------------------------------------------
[ 1, 2) 6 0.18% 0.18%
[ 2, 3) 38 1.12% 1.29%
[ 3, 4) 117 3.44% 4.73%
[ 4, 5) 273 8.02% 12.75% #
[ 5, 5] 2970 87.25% 100.00% ##########
```
</div>
<div class="k-default-codeblock">
```
Number of training obs by leaf:
Count: 3404 Average: 248.806 StdDev: 517.403
Min: 5 Max: 4709 Ignored: 0
----------------------------------------------
[ 5, 240) 2615 76.82% 76.82% ##########
[ 240, 475) 243 7.14% 83.96% #
[ 475, 710) 162 4.76% 88.72% #
[ 710, 946) 104 3.06% 91.77%
[ 946, 1181) 80 2.35% 94.12%
[ 1181, 1416) 48 1.41% 95.53%
[ 1416, 1651) 44 1.29% 96.83%
[ 1651, 1887) 27 0.79% 97.62%
[ 1887, 2122) 18 0.53% 98.15%
[ 2122, 2357) 19 0.56% 98.71%
[ 2357, 2592) 10 0.29% 99.00%
[ 2592, 2828) 6 0.18% 99.18%
[ 2828, 3063) 8 0.24% 99.41%
[ 3063, 3298) 7 0.21% 99.62%
[ 3298, 3533) 3 0.09% 99.71%
[ 3533, 3769) 5 0.15% 99.85%
[ 3769, 4004) 2 0.06% 99.91%
[ 4004, 4239) 1 0.03% 99.94%
[ 4239, 4474) 1 0.03% 99.97%
[ 4474, 4709] 1 0.03% 100.00%
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
```
</div>
<div class="k-default-codeblock">
```
Condition type in nodes:
3267 : HigherCondition
Condition type in nodes with depth <= 0:
137 : HigherCondition
Condition type in nodes with depth <= 1:
405 : HigherCondition
Condition type in nodes with depth <= 2:
903 : HigherCondition
Condition type in nodes with depth <= 3:
1782 : HigherCondition
Condition type in nodes with depth <= 5:
3267 : HigherCondition
```
</div>
<div class="k-default-codeblock">
```
None
```
</div>
<div class="k-default-codeblock">
```
model_2 summary:
Model: "gradient_boosted_trees_model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
=================================================================
Total params: 1
Trainable params: 0
Non-trainable params: 1
_________________________________________________________________
Type: "GRADIENT_BOOSTED_TREES"
Task: CLASSIFICATION
Label: "__LABEL"
```
</div>
<div class="k-default-codeblock">
```
Input Features (1):
data:0
```
</div>
<div class="k-default-codeblock">
```
No weights
```
</div>
<div class="k-default-codeblock">
```
Variable Importance: MEAN_MIN_DEPTH:
1. "__LABEL" 2.250000 ################
2. "data:0" 0.000000
```
</div>
<div class="k-default-codeblock">
```
Variable Importance: NUM_AS_ROOT:
1. "data:0" 117.000000
```
</div>
<div class="k-default-codeblock">
```
Variable Importance: NUM_NODES:
1. "data:0" 351.000000
```
</div>
<div class="k-default-codeblock">
```
Variable Importance: SUM_SCORE:
1. "data:0" 32.035971
```
</div>
<div class="k-default-codeblock">
```
Loss: BINOMIAL_LOG_LIKELIHOOD
Validation loss value: 1.36429
Number of trees per iteration: 1
Node format: NOT_SET
Number of trees: 117
Total number of nodes: 819
```
</div>
<div class="k-default-codeblock">
```
Number of nodes by tree:
Count: 117 Average: 7 StdDev: 0
Min: 7 Max: 7 Ignored: 0
----------------------------------------------
[ 7, 7] 117 100.00% 100.00% ##########
```
</div>
<div class="k-default-codeblock">
```
Depth by leafs:
Count: 468 Average: 2.25 StdDev: 0.829156
Min: 1 Max: 3 Ignored: 0
----------------------------------------------
[ 1, 2) 117 25.00% 25.00% #####
[ 2, 3) 117 25.00% 50.00% #####
[ 3, 3] 234 50.00% 100.00% ##########
```
</div>
<div class="k-default-codeblock">
```
Number of training obs by leaf:
Count: 468 Average: 1545.5 StdDev: 2660.15
Min: 5 Max: 6153 Ignored: 0
----------------------------------------------
[ 5, 312) 351 75.00% 75.00% ##########
[ 312, 619) 0 0.00% 75.00%
[ 619, 927) 0 0.00% 75.00%
[ 927, 1234) 0 0.00% 75.00%
[ 1234, 1542) 0 0.00% 75.00%
[ 1542, 1849) 0 0.00% 75.00%
[ 1849, 2157) 0 0.00% 75.00%
[ 2157, 2464) 0 0.00% 75.00%
[ 2464, 2772) 0 0.00% 75.00%
[ 2772, 3079) 0 0.00% 75.00%
[ 3079, 3386) 0 0.00% 75.00%
[ 3386, 3694) 0 0.00% 75.00%
[ 3694, 4001) 0 0.00% 75.00%
[ 4001, 4309) 0 0.00% 75.00%
[ 4309, 4616) 0 0.00% 75.00%
[ 4616, 4924) 0 0.00% 75.00%
[ 4924, 5231) 0 0.00% 75.00%
[ 5231, 5539) 0 0.00% 75.00%
[ 5539, 5846) 0 0.00% 75.00%
[ 5846, 6153] 117 25.00% 100.00% ###
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes:
351 : data:0 [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes with depth <= 0:
117 : data:0 [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes with depth <= 1:
234 : data:0 [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes with depth <= 2:
351 : data:0 [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes with depth <= 3:
351 : data:0 [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes with depth <= 5:
351 : data:0 [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Condition type in nodes:
351 : ContainsBitmapCondition
Condition type in nodes with depth <= 0:
117 : ContainsBitmapCondition
Condition type in nodes with depth <= 1:
234 : ContainsBitmapCondition
Condition type in nodes with depth <= 2:
351 : ContainsBitmapCondition
Condition type in nodes with depth <= 3:
351 : ContainsBitmapCondition
Condition type in nodes with depth <= 5:
351 : ContainsBitmapCondition
```
</div>
<div class="k-default-codeblock">
```
None
```
</div>
---
## Plotting training metrics
```python
def plot_curve(logs):
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot([log.num_trees for log in logs], [log.evaluation.accuracy for log in logs])
plt.xlabel("Number of trees")
plt.ylabel("Accuracy")
plt.subplot(1, 2, 2)
plt.plot([log.num_trees for log in logs], [log.evaluation.loss for log in logs])
plt.xlabel("Number of trees")
plt.ylabel("Loss")
plt.show()
plot_curve(logs_1)
plot_curve(logs_2)
```


---
## Evaluating on test data
```python
results = model_1.evaluate(test_ds, return_dict=True, verbose=0)
print("model_1 Evaluation: \n")
for name, value in results.items():
print(f"{name}: {value:.4f}")
results = model_2.evaluate(test_ds, return_dict=True, verbose=0)
print("model_2 Evaluation: \n")
for name, value in results.items():
print(f"{name}: {value:.4f}")
```
<div class="k-default-codeblock">
```
model_1 Evaluation:
```
</div>
<div class="k-default-codeblock">
```
loss: 0.0000
Accuracy: 0.8160
recall: 0.7241
precision: 0.8514
auc: 0.8700
model_2 Evaluation:
```
</div>
<div class="k-default-codeblock">
```
loss: 0.0000
Accuracy: 0.5440
recall: 0.0029
precision: 1.0000
auc: 0.5026
```
</div>
---
## Predicting on validation data
```python
test_df.reset_index(inplace=True, drop=True)
for index, row in test_df.iterrows():
text = tf.expand_dims(row["text"], axis=0)
preds = model_1.predict_step(text)
preds = tf.squeeze(tf.round(preds))
print(f"Text: {row['text']}")
print(f"Prediction: {int(preds)}")
print(f"Ground Truth : {row['target']}")
if index == 10:
break
```
<div class="k-default-codeblock">
```
Text: DFR EP016 Monthly Meltdown - On Dnbheaven 2015.08.06 http://t.co/EjKRf8N8A8 #Drum and Bass #heavy #nasty http://t.co/SPHWE6wFI5
Prediction: 0
Ground Truth : 0
Text: FedEx no longer to transport bioterror germs in wake of anthrax lab mishaps http://t.co/qZQc8WWwcN via @usatoday
Prediction: 1
Ground Truth : 0
Text: Gunmen kill four in El Salvador bus attack: Suspected Salvadoran gang members killed four people and wounded s... http://t.co/CNtwB6ScZj
Prediction: 1
Ground Truth : 1
Text: @camilacabello97 Internally and externally screaming
Prediction: 0
Ground Truth : 1
Text: Radiation emergency #preparedness starts with knowing to: get inside stay inside and stay tuned http://t.co/RFFPqBAz2F via @CDCgov
Prediction: 1
Ground Truth : 1
Text: Investigators rule catastrophic structural failure resulted in 2014 Virg.. Related Articles: http://t.co/Cy1LFeNyV8
Prediction: 1
Ground Truth : 1
Text: How the West was burned: Thousands of wildfires ablaze in #California alone http://t.co/iCSjGZ9tE1 #climate #energy http://t.co/9FxmN0l0Bd
Prediction: 1
Ground Truth : 1
Text: Map: Typhoon Soudelor's predicted path as it approaches Taiwan; expected to make landfall over southern China by SÛ_ http://t.co/JDVSGVhlIs
Prediction: 1
Ground Truth : 1
Text: Ûª93 blasts accused Yeda Yakub dies in Karachi of heart attack http://t.co/mfKqyxd8XG #Mumbai
Prediction: 1
Ground Truth : 1
Text: My ears are bleeding https://t.co/k5KnNwugwT
Prediction: 0
Ground Truth : 0
Text: @RedCoatJackpot *As it was typical for them their bullets collided and none managed to reach their targets; such was the ''curse'' of a --
Prediction: 0
Ground Truth : 0
```
</div>
---
## Concluding remarks
The TensorFlow Decision Forests package provides powerful models
that work especially well with structured data. In our experiments,
the Gradient Boosted Tree model with pretrained embeddings achieved 81.6%
test accuracy while the plain Gradient Boosted Tree model had 54.4% accuracy.
| keras-io/examples/nlp/md/tweet-classification-using-tfdf.md/0 | {
"file_path": "keras-io/examples/nlp/md/tweet-classification-using-tfdf.md",
"repo_id": "keras-io",
"token_count": 8932
} | 103 |
# Classification with TensorFlow Decision Forests
**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)<br>
**Date created:** 2022/01/25<br>
**Last modified:** 2022/01/25<br>
**Description:** Using TensorFlow Decision Forests for structured data classification.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/structured_data/ipynb/classification_with_tfdf.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/structured_data/classification_with_tfdf.py)
---
## Introduction
[TensorFlow Decision Forests](https://www.tensorflow.org/decision_forests)
is a collection of state-of-the-art algorithms of Decision Forest models
that are compatible with Keras APIs.
The models include [Random Forests](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/RandomForestModel),
[Gradient Boosted Trees](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel),
and [CART](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/CartModel),
and can be used for regression, classification, and ranking task.
For a beginner's guide to TensorFlow Decision Forests,
please refer to this [tutorial](https://www.tensorflow.org/decision_forests/tutorials/beginner_colab).
This example uses Gradient Boosted Trees model in binary classification of
structured data, and covers the following scenarios:
1. Build a decision forests model by specifying the input feature usage.
2. Implement a custom *Binary Target encoder* as a [Keras Preprocessing layer](https://keras.io/api/layers/preprocessing_layers/)
to encode the categorical features with respect to their target value co-occurrences,
and then use the encoded features to build a decision forests model.
3. Encode the categorical features as [embeddings](https://keras.io/api/layers/core_layers/embedding),
train these embeddings in a simple NN model, and then use the
trained embeddings as inputs to build decision forests model.
This example uses TensorFlow 2.7 or higher,
as well as [TensorFlow Decision Forests](https://www.tensorflow.org/decision_forests),
which you can install using the following command:
```python
pip install -U tensorflow_decision_forests
```
---
## Setup
```python
import math
import urllib
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_decision_forests as tfdf
```
---
## Prepare the data
This example uses the
[United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29)
provided by the [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).
The task is binary classification to determine whether a person makes over 50K a year.
The dataset includes ~300K instances with 41 input features: 7 numerical features
and 34 categorical features.
First we load the data from the UCI Machine Learning Repository into a Pandas DataFrame.
```python
BASE_PATH = "https://kdd.ics.uci.edu/databases/census-income/census-income"
CSV_HEADER = [
l.decode("utf-8").split(":")[0].replace(" ", "_")
for l in urllib.request.urlopen(f"{BASE_PATH}.names")
if not l.startswith(b"|")
][2:]
CSV_HEADER.append("income_level")
train_data = pd.read_csv(f"{BASE_PATH}.data.gz", header=None, names=CSV_HEADER,)
test_data = pd.read_csv(f"{BASE_PATH}.test.gz", header=None, names=CSV_HEADER,)
```
---
## Define dataset metadata
Here, we define the metadata of the dataset that will be useful for encoding
the input features with respect to their types.
```python
# Target column name.
TARGET_COLUMN_NAME = "income_level"
# The labels of the target columns.
TARGET_LABELS = [" - 50000.", " 50000+."]
# Weight column name.
WEIGHT_COLUMN_NAME = "instance_weight"
# Numeric feature names.
NUMERIC_FEATURE_NAMES = [
"age",
"wage_per_hour",
"capital_gains",
"capital_losses",
"dividends_from_stocks",
"num_persons_worked_for_employer",
"weeks_worked_in_year",
]
# Categorical features and their vocabulary lists.
CATEGORICAL_FEATURE_NAMES = [
"class_of_worker",
"detailed_industry_recode",
"detailed_occupation_recode",
"education",
"enroll_in_edu_inst_last_wk",
"marital_stat",
"major_industry_code",
"major_occupation_code",
"race",
"hispanic_origin",
"sex",
"member_of_a_labor_union",
"reason_for_unemployment",
"full_or_part_time_employment_stat",
"tax_filer_stat",
"region_of_previous_residence",
"state_of_previous_residence",
"detailed_household_and_family_stat",
"detailed_household_summary_in_household",
"migration_code-change_in_msa",
"migration_code-change_in_reg",
"migration_code-move_within_reg",
"live_in_this_house_1_year_ago",
"migration_prev_res_in_sunbelt",
"family_members_under_18",
"country_of_birth_father",
"country_of_birth_mother",
"country_of_birth_self",
"citizenship",
"own_business_or_self_employed",
"fill_inc_questionnaire_for_veteran's_admin",
"veterans_benefits",
"year",
]
```
Now we perform basic data preparation.
```python
def prepare_dataframe(dataframe):
# Convert the target labels from string to integer.
dataframe[TARGET_COLUMN_NAME] = dataframe[TARGET_COLUMN_NAME].map(
TARGET_LABELS.index
)
# Cast the categorical features to string.
for feature_name in CATEGORICAL_FEATURE_NAMES:
dataframe[feature_name] = dataframe[feature_name].astype(str)
prepare_dataframe(train_data)
prepare_dataframe(test_data)
```
Now let's show the shapes of the training and test dataframes, and display some instances.
```python
print(f"Train data shape: {train_data.shape}")
print(f"Test data shape: {test_data.shape}")
print(train_data.head().T)
```
<div class="k-default-codeblock">
```
Train data shape: (199523, 42)
Test data shape: (99762, 42)
0 \
age 73
class_of_worker Not in universe
detailed_industry_recode 0
detailed_occupation_recode 0
education High school graduate
wage_per_hour 0
enroll_in_edu_inst_last_wk Not in universe
marital_stat Widowed
major_industry_code Not in universe or children
major_occupation_code Not in universe
race White
hispanic_origin All other
sex Female
member_of_a_labor_union Not in universe
reason_for_unemployment Not in universe
full_or_part_time_employment_stat Not in labor force
capital_gains 0
capital_losses 0
dividends_from_stocks 0
tax_filer_stat Nonfiler
region_of_previous_residence Not in universe
state_of_previous_residence Not in universe
detailed_household_and_family_stat Other Rel 18+ ever marr not in subfamily
detailed_household_summary_in_household Other relative of householder
instance_weight 1700.09
migration_code-change_in_msa ?
migration_code-change_in_reg ?
migration_code-move_within_reg ?
live_in_this_house_1_year_ago Not in universe under 1 year old
migration_prev_res_in_sunbelt ?
num_persons_worked_for_employer 0
family_members_under_18 Not in universe
country_of_birth_father United-States
country_of_birth_mother United-States
country_of_birth_self United-States
citizenship Native- Born in the United States
own_business_or_self_employed 0
fill_inc_questionnaire_for_veteran's_admin Not in universe
veterans_benefits 2
weeks_worked_in_year 0
year 95
income_level 0
```
</div>
<div class="k-default-codeblock">
```
1 \
age 58
class_of_worker Self-employed-not incorporated
detailed_industry_recode 4
detailed_occupation_recode 34
education Some college but no degree
wage_per_hour 0
enroll_in_edu_inst_last_wk Not in universe
marital_stat Divorced
major_industry_code Construction
major_occupation_code Precision production craft & repair
race White
hispanic_origin All other
sex Male
member_of_a_labor_union Not in universe
reason_for_unemployment Not in universe
full_or_part_time_employment_stat Children or Armed Forces
capital_gains 0
capital_losses 0
dividends_from_stocks 0
tax_filer_stat Head of household
region_of_previous_residence South
state_of_previous_residence Arkansas
detailed_household_and_family_stat Householder
detailed_household_summary_in_household Householder
instance_weight 1053.55
migration_code-change_in_msa MSA to MSA
migration_code-change_in_reg Same county
migration_code-move_within_reg Same county
live_in_this_house_1_year_ago No
migration_prev_res_in_sunbelt Yes
num_persons_worked_for_employer 1
family_members_under_18 Not in universe
country_of_birth_father United-States
country_of_birth_mother United-States
country_of_birth_self United-States
citizenship Native- Born in the United States
own_business_or_self_employed 0
fill_inc_questionnaire_for_veteran's_admin Not in universe
veterans_benefits 2
weeks_worked_in_year 52
year 94
income_level 0
```
</div>
<div class="k-default-codeblock">
```
2 \
age 18
class_of_worker Not in universe
detailed_industry_recode 0
detailed_occupation_recode 0
education 10th grade
wage_per_hour 0
enroll_in_edu_inst_last_wk High school
marital_stat Never married
major_industry_code Not in universe or children
major_occupation_code Not in universe
race Asian or Pacific Islander
hispanic_origin All other
sex Female
member_of_a_labor_union Not in universe
reason_for_unemployment Not in universe
full_or_part_time_employment_stat Not in labor force
capital_gains 0
capital_losses 0
dividends_from_stocks 0
tax_filer_stat Nonfiler
region_of_previous_residence Not in universe
state_of_previous_residence Not in universe
detailed_household_and_family_stat Child 18+ never marr Not in a subfamily
detailed_household_summary_in_household Child 18 or older
instance_weight 991.95
migration_code-change_in_msa ?
migration_code-change_in_reg ?
migration_code-move_within_reg ?
live_in_this_house_1_year_ago Not in universe under 1 year old
migration_prev_res_in_sunbelt ?
num_persons_worked_for_employer 0
family_members_under_18 Not in universe
country_of_birth_father Vietnam
country_of_birth_mother Vietnam
country_of_birth_self Vietnam
citizenship Foreign born- Not a citizen of U S
own_business_or_self_employed 0
fill_inc_questionnaire_for_veteran's_admin Not in universe
veterans_benefits 2
weeks_worked_in_year 0
year 95
income_level 0
```
</div>
<div class="k-default-codeblock">
```
3 \
age 9
class_of_worker Not in universe
detailed_industry_recode 0
detailed_occupation_recode 0
education Children
wage_per_hour 0
enroll_in_edu_inst_last_wk Not in universe
marital_stat Never married
major_industry_code Not in universe or children
major_occupation_code Not in universe
race White
hispanic_origin All other
sex Female
member_of_a_labor_union Not in universe
reason_for_unemployment Not in universe
full_or_part_time_employment_stat Children or Armed Forces
capital_gains 0
capital_losses 0
dividends_from_stocks 0
tax_filer_stat Nonfiler
region_of_previous_residence Not in universe
state_of_previous_residence Not in universe
detailed_household_and_family_stat Child <18 never marr not in subfamily
detailed_household_summary_in_household Child under 18 never married
instance_weight 1758.14
migration_code-change_in_msa Nonmover
migration_code-change_in_reg Nonmover
migration_code-move_within_reg Nonmover
live_in_this_house_1_year_ago Yes
migration_prev_res_in_sunbelt Not in universe
num_persons_worked_for_employer 0
family_members_under_18 Both parents present
country_of_birth_father United-States
country_of_birth_mother United-States
country_of_birth_self United-States
citizenship Native- Born in the United States
own_business_or_self_employed 0
fill_inc_questionnaire_for_veteran's_admin Not in universe
veterans_benefits 0
weeks_worked_in_year 0
year 94
income_level 0
```
</div>
<div class="k-default-codeblock">
```
4
age 10
class_of_worker Not in universe
detailed_industry_recode 0
detailed_occupation_recode 0
education Children
wage_per_hour 0
enroll_in_edu_inst_last_wk Not in universe
marital_stat Never married
major_industry_code Not in universe or children
major_occupation_code Not in universe
race White
hispanic_origin All other
sex Female
member_of_a_labor_union Not in universe
reason_for_unemployment Not in universe
full_or_part_time_employment_stat Children or Armed Forces
capital_gains 0
capital_losses 0
dividends_from_stocks 0
tax_filer_stat Nonfiler
region_of_previous_residence Not in universe
state_of_previous_residence Not in universe
detailed_household_and_family_stat Child <18 never marr not in subfamily
detailed_household_summary_in_household Child under 18 never married
instance_weight 1069.16
migration_code-change_in_msa Nonmover
migration_code-change_in_reg Nonmover
migration_code-move_within_reg Nonmover
live_in_this_house_1_year_ago Yes
migration_prev_res_in_sunbelt Not in universe
num_persons_worked_for_employer 0
family_members_under_18 Both parents present
country_of_birth_father United-States
country_of_birth_mother United-States
country_of_birth_self United-States
citizenship Native- Born in the United States
own_business_or_self_employed 0
fill_inc_questionnaire_for_veteran's_admin Not in universe
veterans_benefits 0
weeks_worked_in_year 0
year 94
income_level 0
```
</div>
---
## Configure hyperparameters
You can find all the parameters of the Gradient Boosted Tree model in the
[documentation](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel)
```python
# Maximum number of decision trees. The effective number of trained trees can be smaller if early stopping is enabled.
NUM_TREES = 250
# Minimum number of examples in a node.
MIN_EXAMPLES = 6
# Maximum depth of the tree. max_depth=1 means that all trees will be roots.
MAX_DEPTH = 5
# Ratio of the dataset (sampling without replacement) used to train individual trees for the random sampling method.
SUBSAMPLE = 0.65
# Control the sampling of the datasets used to train individual trees.
SAMPLING_METHOD = "RANDOM"
# Ratio of the training dataset used to monitor the training. Require to be >0 if early stopping is enabled.
VALIDATION_RATIO = 0.1
```
---
## Implement a training and evaluation procedure
The `run_experiment()` method is responsible loading the train and test datasets,
training a given model, and evaluating the trained model.
Note that when training a Decision Forests model, only one epoch is needed to
read the full dataset. Any extra steps will result in unnecessary slower training.
Therefore, the default `num_epochs=1` is used in the `run_experiment()` method.
```python
def run_experiment(model, train_data, test_data, num_epochs=1, batch_size=None):
train_dataset = tfdf.keras.pd_dataframe_to_tf_dataset(
train_data, label=TARGET_COLUMN_NAME, weight=WEIGHT_COLUMN_NAME
)
test_dataset = tfdf.keras.pd_dataframe_to_tf_dataset(
test_data, label=TARGET_COLUMN_NAME, weight=WEIGHT_COLUMN_NAME
)
model.fit(train_dataset, epochs=num_epochs, batch_size=batch_size)
_, accuracy = model.evaluate(test_dataset, verbose=0)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
```
---
## Experiment 1: Decision Forests with raw features
### Specify model input feature usages
You can attach semantics to each feature to control how it is used by the model.
If not specified, the semantics are inferred from the representation type.
It is recommended to specify the [feature usages](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/FeatureUsage)
explicitly to avoid incorrect inferred semantics is incorrect.
For example, a categorical value identifier (integer) will be be inferred as numerical,
while it is semantically categorical.
For numerical features, you can set the `discretized` parameters to the number
of buckets by which the numerical feature should be discretized.
This makes the training faster but may lead to worse models.
```python
def specify_feature_usages():
feature_usages = []
for feature_name in NUMERIC_FEATURE_NAMES:
feature_usage = tfdf.keras.FeatureUsage(
name=feature_name, semantic=tfdf.keras.FeatureSemantic.NUMERICAL
)
feature_usages.append(feature_usage)
for feature_name in CATEGORICAL_FEATURE_NAMES:
feature_usage = tfdf.keras.FeatureUsage(
name=feature_name, semantic=tfdf.keras.FeatureSemantic.CATEGORICAL
)
feature_usages.append(feature_usage)
return feature_usages
```
### Create a Gradient Boosted Trees model
When compiling a decision forests model, you may only provide extra evaluation metrics.
The loss is specified in the model construction,
and the optimizer is irrelevant to decision forests models.
```python
def create_gbt_model():
# See all the model parameters in https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel
gbt_model = tfdf.keras.GradientBoostedTreesModel(
features=specify_feature_usages(),
exclude_non_specified_features=True,
num_trees=NUM_TREES,
max_depth=MAX_DEPTH,
min_examples=MIN_EXAMPLES,
subsample=SUBSAMPLE,
validation_ratio=VALIDATION_RATIO,
task=tfdf.keras.Task.CLASSIFICATION,
)
gbt_model.compile(metrics=[keras.metrics.BinaryAccuracy(name="accuracy")])
return gbt_model
```
### Train and evaluate the model
```python
gbt_model = create_gbt_model()
run_experiment(gbt_model, train_data, test_data)
```
<div class="k-default-codeblock">
```
Starting reading the dataset
200/200 [==============================] - ETA: 0s
Dataset read in 0:00:08.829036
Training model
Model trained in 0:00:48.639771
Compiling model
200/200 [==============================] - 58s 268ms/step
Test accuracy: 95.79%
```
</div>
### Inspect the model
The `model.summary()` method will display several types of information about
your decision trees model, model type, task, input features, and feature importance.
```python
print(gbt_model.summary())
```
<div class="k-default-codeblock">
```
Model: "gradient_boosted_trees_model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
=================================================================
Total params: 1
Trainable params: 0
Non-trainable params: 1
_________________________________________________________________
Type: "GRADIENT_BOOSTED_TREES"
Task: CLASSIFICATION
Label: "__LABEL"
```
</div>
<div class="k-default-codeblock">
```
Input Features (40):
age
capital_gains
capital_losses
citizenship
class_of_worker
country_of_birth_father
country_of_birth_mother
country_of_birth_self
detailed_household_and_family_stat
detailed_household_summary_in_household
detailed_industry_recode
detailed_occupation_recode
dividends_from_stocks
education
enroll_in_edu_inst_last_wk
family_members_under_18
fill_inc_questionnaire_for_veteran's_admin
full_or_part_time_employment_stat
hispanic_origin
live_in_this_house_1_year_ago
major_industry_code
major_occupation_code
marital_stat
member_of_a_labor_union
migration_code-change_in_msa
migration_code-change_in_reg
migration_code-move_within_reg
migration_prev_res_in_sunbelt
num_persons_worked_for_employer
own_business_or_self_employed
race
reason_for_unemployment
region_of_previous_residence
sex
state_of_previous_residence
tax_filer_stat
veterans_benefits
wage_per_hour
weeks_worked_in_year
year
```
</div>
<div class="k-default-codeblock">
```
Trained with weights
```
</div>
<div class="k-default-codeblock">
```
Variable Importance: MEAN_MIN_DEPTH:
1. "enroll_in_edu_inst_last_wk" 3.942647 ################
2. "family_members_under_18" 3.942647 ################
3. "live_in_this_house_1_year_ago" 3.942647 ################
4. "migration_code-change_in_msa" 3.942647 ################
5. "migration_code-move_within_reg" 3.942647 ################
6. "year" 3.942647 ################
7. "__LABEL" 3.942647 ################
8. "__WEIGHTS" 3.942647 ################
9. "citizenship" 3.942137 ###############
10. "detailed_household_summary_in_household" 3.942137 ###############
11. "region_of_previous_residence" 3.942137 ###############
12. "veterans_benefits" 3.942137 ###############
13. "migration_prev_res_in_sunbelt" 3.940135 ###############
14. "migration_code-change_in_reg" 3.939926 ###############
15. "major_occupation_code" 3.937681 ###############
16. "major_industry_code" 3.933687 ###############
17. "reason_for_unemployment" 3.926320 ###############
18. "hispanic_origin" 3.900776 ###############
19. "member_of_a_labor_union" 3.894843 ###############
20. "race" 3.878617 ###############
21. "num_persons_worked_for_employer" 3.818566 ##############
22. "marital_stat" 3.795667 ##############
23. "full_or_part_time_employment_stat" 3.795431 ##############
24. "country_of_birth_mother" 3.787967 ##############
25. "tax_filer_stat" 3.784505 ##############
26. "fill_inc_questionnaire_for_veteran's_admin" 3.783607 ##############
27. "own_business_or_self_employed" 3.776398 ##############
28. "country_of_birth_father" 3.715252 #############
29. "sex" 3.708745 #############
30. "class_of_worker" 3.688424 #############
31. "weeks_worked_in_year" 3.665290 #############
32. "state_of_previous_residence" 3.657234 #############
33. "country_of_birth_self" 3.654377 #############
34. "age" 3.634295 ############
35. "wage_per_hour" 3.617817 ############
36. "detailed_household_and_family_stat" 3.594743 ############
37. "capital_losses" 3.439298 ##########
38. "dividends_from_stocks" 3.423652 ##########
39. "capital_gains" 3.222753 ########
40. "education" 3.158698 ########
41. "detailed_industry_recode" 2.981471 ######
42. "detailed_occupation_recode" 2.364817
```
</div>
<div class="k-default-codeblock">
```
Variable Importance: NUM_AS_ROOT:
1. "education" 33.000000 ################
2. "capital_gains" 29.000000 ##############
3. "capital_losses" 24.000000 ###########
4. "detailed_household_and_family_stat" 14.000000 ######
5. "dividends_from_stocks" 14.000000 ######
6. "wage_per_hour" 12.000000 #####
7. "country_of_birth_self" 11.000000 #####
8. "detailed_occupation_recode" 11.000000 #####
9. "weeks_worked_in_year" 11.000000 #####
10. "age" 10.000000 ####
11. "state_of_previous_residence" 10.000000 ####
12. "fill_inc_questionnaire_for_veteran's_admin" 9.000000 ####
13. "class_of_worker" 8.000000 ###
14. "full_or_part_time_employment_stat" 8.000000 ###
15. "marital_stat" 8.000000 ###
16. "own_business_or_self_employed" 8.000000 ###
17. "sex" 6.000000 ##
18. "tax_filer_stat" 5.000000 ##
19. "country_of_birth_father" 4.000000 #
20. "race" 3.000000 #
21. "detailed_industry_recode" 2.000000
22. "hispanic_origin" 2.000000
23. "country_of_birth_mother" 1.000000
24. "num_persons_worked_for_employer" 1.000000
25. "reason_for_unemployment" 1.000000
```
</div>
<div class="k-default-codeblock">
```
Variable Importance: NUM_NODES:
1. "detailed_occupation_recode" 785.000000 ################
2. "detailed_industry_recode" 668.000000 #############
3. "capital_gains" 275.000000 #####
4. "dividends_from_stocks" 220.000000 ####
5. "capital_losses" 197.000000 ####
6. "education" 178.000000 ###
7. "country_of_birth_mother" 128.000000 ##
8. "country_of_birth_father" 116.000000 ##
9. "age" 114.000000 ##
10. "wage_per_hour" 98.000000 #
11. "state_of_previous_residence" 95.000000 #
12. "detailed_household_and_family_stat" 78.000000 #
13. "class_of_worker" 67.000000 #
14. "country_of_birth_self" 65.000000 #
15. "sex" 65.000000 #
16. "weeks_worked_in_year" 60.000000 #
17. "tax_filer_stat" 57.000000 #
18. "num_persons_worked_for_employer" 54.000000 #
19. "own_business_or_self_employed" 30.000000
20. "marital_stat" 26.000000
21. "member_of_a_labor_union" 16.000000
22. "fill_inc_questionnaire_for_veteran's_admin" 15.000000
23. "full_or_part_time_employment_stat" 15.000000
24. "major_industry_code" 15.000000
25. "hispanic_origin" 9.000000
26. "major_occupation_code" 7.000000
27. "race" 7.000000
28. "citizenship" 1.000000
29. "detailed_household_summary_in_household" 1.000000
30. "migration_code-change_in_reg" 1.000000
31. "migration_prev_res_in_sunbelt" 1.000000
32. "reason_for_unemployment" 1.000000
33. "region_of_previous_residence" 1.000000
34. "veterans_benefits" 1.000000
```
</div>
<div class="k-default-codeblock">
```
Variable Importance: SUM_SCORE:
1. "detailed_occupation_recode" 15392441.075369 ################
2. "capital_gains" 5277826.822514 #####
3. "education" 4751749.289550 ####
4. "dividends_from_stocks" 3792002.951255 ###
5. "detailed_industry_recode" 2882200.882109 ##
6. "sex" 2559417.877325 ##
7. "age" 2042990.944829 ##
8. "capital_losses" 1735728.772551 #
9. "weeks_worked_in_year" 1272820.203971 #
10. "tax_filer_stat" 697890.160846
11. "num_persons_worked_for_employer" 671351.905595
12. "detailed_household_and_family_stat" 444620.829557
13. "class_of_worker" 362250.565331
14. "country_of_birth_mother" 296311.574426
15. "country_of_birth_father" 258198.889206
16. "wage_per_hour" 239764.219048
17. "state_of_previous_residence" 237687.602572
18. "country_of_birth_self" 103002.168158
19. "marital_stat" 102449.735314
20. "own_business_or_self_employed" 82938.893541
21. "fill_inc_questionnaire_for_veteran's_admin" 22692.700206
22. "full_or_part_time_employment_stat" 19078.398837
23. "major_industry_code" 18450.345505
24. "member_of_a_labor_union" 14905.360879
25. "hispanic_origin" 12602.867902
26. "major_occupation_code" 8709.665989
27. "race" 6116.282065
28. "citizenship" 3291.490393
29. "detailed_household_summary_in_household" 2733.439375
30. "veterans_benefits" 1230.940488
31. "region_of_previous_residence" 1139.240981
32. "reason_for_unemployment" 219.245124
33. "migration_code-change_in_reg" 55.806436
34. "migration_prev_res_in_sunbelt" 37.780635
```
</div>
<div class="k-default-codeblock">
```
Loss: BINOMIAL_LOG_LIKELIHOOD
Validation loss value: 0.228983
Number of trees per iteration: 1
Node format: NOT_SET
Number of trees: 245
Total number of nodes: 7179
```
</div>
<div class="k-default-codeblock">
```
Number of nodes by tree:
Count: 245 Average: 29.302 StdDev: 2.96211
Min: 17 Max: 31 Ignored: 0
----------------------------------------------
[ 17, 18) 2 0.82% 0.82%
[ 18, 19) 0 0.00% 0.82%
[ 19, 20) 3 1.22% 2.04%
[ 20, 21) 0 0.00% 2.04%
[ 21, 22) 4 1.63% 3.67%
[ 22, 23) 0 0.00% 3.67%
[ 23, 24) 15 6.12% 9.80% #
[ 24, 25) 0 0.00% 9.80%
[ 25, 26) 5 2.04% 11.84%
[ 26, 27) 0 0.00% 11.84%
[ 27, 28) 21 8.57% 20.41% #
[ 28, 29) 0 0.00% 20.41%
[ 29, 30) 39 15.92% 36.33% ###
[ 30, 31) 0 0.00% 36.33%
[ 31, 31] 156 63.67% 100.00% ##########
```
</div>
<div class="k-default-codeblock">
```
Depth by leafs:
Count: 3712 Average: 3.95259 StdDev: 0.249814
Min: 2 Max: 4 Ignored: 0
----------------------------------------------
[ 2, 3) 32 0.86% 0.86%
[ 3, 4) 112 3.02% 3.88%
[ 4, 4] 3568 96.12% 100.00% ##########
```
</div>
<div class="k-default-codeblock">
```
Number of training obs by leaf:
Count: 3712 Average: 11849.3 StdDev: 33719.3
Min: 6 Max: 179360 Ignored: 0
----------------------------------------------
[ 6, 8973) 3100 83.51% 83.51% ##########
[ 8973, 17941) 148 3.99% 87.50%
[ 17941, 26909) 79 2.13% 89.63%
[ 26909, 35877) 36 0.97% 90.60%
[ 35877, 44844) 44 1.19% 91.78%
[ 44844, 53812) 17 0.46% 92.24%
[ 53812, 62780) 20 0.54% 92.78%
[ 62780, 71748) 39 1.05% 93.83%
[ 71748, 80715) 24 0.65% 94.48%
[ 80715, 89683) 12 0.32% 94.80%
[ 89683, 98651) 22 0.59% 95.39%
[ 98651, 107619) 21 0.57% 95.96%
[ 107619, 116586) 17 0.46% 96.42%
[ 116586, 125554) 17 0.46% 96.88%
[ 125554, 134522) 13 0.35% 97.23%
[ 134522, 143490) 8 0.22% 97.44%
[ 143490, 152457) 5 0.13% 97.58%
[ 152457, 161425) 6 0.16% 97.74%
[ 161425, 170393) 15 0.40% 98.14%
[ 170393, 179360] 69 1.86% 100.00%
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes:
785 : detailed_occupation_recode [CATEGORICAL]
668 : detailed_industry_recode [CATEGORICAL]
275 : capital_gains [NUMERICAL]
220 : dividends_from_stocks [NUMERICAL]
197 : capital_losses [NUMERICAL]
178 : education [CATEGORICAL]
128 : country_of_birth_mother [CATEGORICAL]
116 : country_of_birth_father [CATEGORICAL]
114 : age [NUMERICAL]
98 : wage_per_hour [NUMERICAL]
95 : state_of_previous_residence [CATEGORICAL]
78 : detailed_household_and_family_stat [CATEGORICAL]
67 : class_of_worker [CATEGORICAL]
65 : sex [CATEGORICAL]
65 : country_of_birth_self [CATEGORICAL]
60 : weeks_worked_in_year [NUMERICAL]
57 : tax_filer_stat [CATEGORICAL]
54 : num_persons_worked_for_employer [NUMERICAL]
30 : own_business_or_self_employed [CATEGORICAL]
26 : marital_stat [CATEGORICAL]
16 : member_of_a_labor_union [CATEGORICAL]
15 : major_industry_code [CATEGORICAL]
15 : full_or_part_time_employment_stat [CATEGORICAL]
15 : fill_inc_questionnaire_for_veteran's_admin [CATEGORICAL]
9 : hispanic_origin [CATEGORICAL]
7 : race [CATEGORICAL]
7 : major_occupation_code [CATEGORICAL]
1 : veterans_benefits [CATEGORICAL]
1 : region_of_previous_residence [CATEGORICAL]
1 : reason_for_unemployment [CATEGORICAL]
1 : migration_prev_res_in_sunbelt [CATEGORICAL]
1 : migration_code-change_in_reg [CATEGORICAL]
1 : detailed_household_summary_in_household [CATEGORICAL]
1 : citizenship [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes with depth <= 0:
33 : education [CATEGORICAL]
29 : capital_gains [NUMERICAL]
24 : capital_losses [NUMERICAL]
14 : dividends_from_stocks [NUMERICAL]
14 : detailed_household_and_family_stat [CATEGORICAL]
12 : wage_per_hour [NUMERICAL]
11 : weeks_worked_in_year [NUMERICAL]
11 : detailed_occupation_recode [CATEGORICAL]
11 : country_of_birth_self [CATEGORICAL]
10 : state_of_previous_residence [CATEGORICAL]
10 : age [NUMERICAL]
9 : fill_inc_questionnaire_for_veteran's_admin [CATEGORICAL]
8 : own_business_or_self_employed [CATEGORICAL]
8 : marital_stat [CATEGORICAL]
8 : full_or_part_time_employment_stat [CATEGORICAL]
8 : class_of_worker [CATEGORICAL]
6 : sex [CATEGORICAL]
5 : tax_filer_stat [CATEGORICAL]
4 : country_of_birth_father [CATEGORICAL]
3 : race [CATEGORICAL]
2 : hispanic_origin [CATEGORICAL]
2 : detailed_industry_recode [CATEGORICAL]
1 : reason_for_unemployment [CATEGORICAL]
1 : num_persons_worked_for_employer [NUMERICAL]
1 : country_of_birth_mother [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes with depth <= 1:
140 : detailed_occupation_recode [CATEGORICAL]
82 : capital_gains [NUMERICAL]
65 : capital_losses [NUMERICAL]
62 : education [CATEGORICAL]
59 : detailed_industry_recode [CATEGORICAL]
47 : dividends_from_stocks [NUMERICAL]
31 : wage_per_hour [NUMERICAL]
26 : detailed_household_and_family_stat [CATEGORICAL]
23 : age [NUMERICAL]
22 : state_of_previous_residence [CATEGORICAL]
21 : country_of_birth_self [CATEGORICAL]
21 : class_of_worker [CATEGORICAL]
20 : weeks_worked_in_year [NUMERICAL]
20 : sex [CATEGORICAL]
15 : country_of_birth_father [CATEGORICAL]
12 : own_business_or_self_employed [CATEGORICAL]
11 : fill_inc_questionnaire_for_veteran's_admin [CATEGORICAL]
10 : num_persons_worked_for_employer [NUMERICAL]
9 : tax_filer_stat [CATEGORICAL]
9 : full_or_part_time_employment_stat [CATEGORICAL]
8 : marital_stat [CATEGORICAL]
8 : country_of_birth_mother [CATEGORICAL]
6 : member_of_a_labor_union [CATEGORICAL]
5 : race [CATEGORICAL]
2 : hispanic_origin [CATEGORICAL]
1 : reason_for_unemployment [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes with depth <= 2:
399 : detailed_occupation_recode [CATEGORICAL]
249 : detailed_industry_recode [CATEGORICAL]
170 : capital_gains [NUMERICAL]
117 : dividends_from_stocks [NUMERICAL]
116 : capital_losses [NUMERICAL]
87 : education [CATEGORICAL]
59 : wage_per_hour [NUMERICAL]
45 : detailed_household_and_family_stat [CATEGORICAL]
43 : country_of_birth_father [CATEGORICAL]
43 : age [NUMERICAL]
40 : country_of_birth_self [CATEGORICAL]
38 : state_of_previous_residence [CATEGORICAL]
38 : class_of_worker [CATEGORICAL]
37 : sex [CATEGORICAL]
36 : weeks_worked_in_year [NUMERICAL]
33 : country_of_birth_mother [CATEGORICAL]
28 : num_persons_worked_for_employer [NUMERICAL]
26 : tax_filer_stat [CATEGORICAL]
14 : own_business_or_self_employed [CATEGORICAL]
14 : marital_stat [CATEGORICAL]
12 : full_or_part_time_employment_stat [CATEGORICAL]
12 : fill_inc_questionnaire_for_veteran's_admin [CATEGORICAL]
8 : member_of_a_labor_union [CATEGORICAL]
6 : race [CATEGORICAL]
6 : hispanic_origin [CATEGORICAL]
2 : major_occupation_code [CATEGORICAL]
2 : major_industry_code [CATEGORICAL]
1 : reason_for_unemployment [CATEGORICAL]
1 : migration_prev_res_in_sunbelt [CATEGORICAL]
1 : migration_code-change_in_reg [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes with depth <= 3:
785 : detailed_occupation_recode [CATEGORICAL]
668 : detailed_industry_recode [CATEGORICAL]
275 : capital_gains [NUMERICAL]
220 : dividends_from_stocks [NUMERICAL]
197 : capital_losses [NUMERICAL]
178 : education [CATEGORICAL]
128 : country_of_birth_mother [CATEGORICAL]
116 : country_of_birth_father [CATEGORICAL]
114 : age [NUMERICAL]
98 : wage_per_hour [NUMERICAL]
95 : state_of_previous_residence [CATEGORICAL]
78 : detailed_household_and_family_stat [CATEGORICAL]
67 : class_of_worker [CATEGORICAL]
65 : sex [CATEGORICAL]
65 : country_of_birth_self [CATEGORICAL]
60 : weeks_worked_in_year [NUMERICAL]
57 : tax_filer_stat [CATEGORICAL]
54 : num_persons_worked_for_employer [NUMERICAL]
30 : own_business_or_self_employed [CATEGORICAL]
26 : marital_stat [CATEGORICAL]
16 : member_of_a_labor_union [CATEGORICAL]
15 : major_industry_code [CATEGORICAL]
15 : full_or_part_time_employment_stat [CATEGORICAL]
15 : fill_inc_questionnaire_for_veteran's_admin [CATEGORICAL]
9 : hispanic_origin [CATEGORICAL]
7 : race [CATEGORICAL]
7 : major_occupation_code [CATEGORICAL]
1 : veterans_benefits [CATEGORICAL]
1 : region_of_previous_residence [CATEGORICAL]
1 : reason_for_unemployment [CATEGORICAL]
1 : migration_prev_res_in_sunbelt [CATEGORICAL]
1 : migration_code-change_in_reg [CATEGORICAL]
1 : detailed_household_summary_in_household [CATEGORICAL]
1 : citizenship [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Attribute in nodes with depth <= 5:
785 : detailed_occupation_recode [CATEGORICAL]
668 : detailed_industry_recode [CATEGORICAL]
275 : capital_gains [NUMERICAL]
220 : dividends_from_stocks [NUMERICAL]
197 : capital_losses [NUMERICAL]
178 : education [CATEGORICAL]
128 : country_of_birth_mother [CATEGORICAL]
116 : country_of_birth_father [CATEGORICAL]
114 : age [NUMERICAL]
98 : wage_per_hour [NUMERICAL]
95 : state_of_previous_residence [CATEGORICAL]
78 : detailed_household_and_family_stat [CATEGORICAL]
67 : class_of_worker [CATEGORICAL]
65 : sex [CATEGORICAL]
65 : country_of_birth_self [CATEGORICAL]
60 : weeks_worked_in_year [NUMERICAL]
57 : tax_filer_stat [CATEGORICAL]
54 : num_persons_worked_for_employer [NUMERICAL]
30 : own_business_or_self_employed [CATEGORICAL]
26 : marital_stat [CATEGORICAL]
16 : member_of_a_labor_union [CATEGORICAL]
15 : major_industry_code [CATEGORICAL]
15 : full_or_part_time_employment_stat [CATEGORICAL]
15 : fill_inc_questionnaire_for_veteran's_admin [CATEGORICAL]
9 : hispanic_origin [CATEGORICAL]
7 : race [CATEGORICAL]
7 : major_occupation_code [CATEGORICAL]
1 : veterans_benefits [CATEGORICAL]
1 : region_of_previous_residence [CATEGORICAL]
1 : reason_for_unemployment [CATEGORICAL]
1 : migration_prev_res_in_sunbelt [CATEGORICAL]
1 : migration_code-change_in_reg [CATEGORICAL]
1 : detailed_household_summary_in_household [CATEGORICAL]
1 : citizenship [CATEGORICAL]
```
</div>
<div class="k-default-codeblock">
```
Condition type in nodes:
2418 : ContainsBitmapCondition
1018 : HigherCondition
31 : ContainsCondition
Condition type in nodes with depth <= 0:
137 : ContainsBitmapCondition
101 : HigherCondition
7 : ContainsCondition
Condition type in nodes with depth <= 1:
448 : ContainsBitmapCondition
278 : HigherCondition
9 : ContainsCondition
Condition type in nodes with depth <= 2:
1097 : ContainsBitmapCondition
569 : HigherCondition
17 : ContainsCondition
Condition type in nodes with depth <= 3:
2418 : ContainsBitmapCondition
1018 : HigherCondition
31 : ContainsCondition
Condition type in nodes with depth <= 5:
2418 : ContainsBitmapCondition
1018 : HigherCondition
31 : ContainsCondition
```
</div>
<div class="k-default-codeblock">
```
None
```
</div>
---
## Experiment 2: Decision Forests with target encoding
[Target encoding](https://dl.acm.org/doi/10.1145/507533.507538) is a common preprocessing
technique for categorical features that convert them into numerical features.
Using categorical features with high cardinality as-is may lead to overfitting.
Target encoding aims to replace each categorical feature value with one or more
numerical values that represent its co-occurrence with the target labels.
More precisely, given a categorical feature, the binary target encoder in this example
will produce three new numerical features:
1. `positive_frequency`: How many times each feature value occurred with a positive target label.
2. `negative_frequency`: How many times each feature value occurred with a negative target label.
3. `positive_probability`: The probability that the target label is positive,
given the feature value, which is computed as
`positive_frequency / (positive_frequency + negative_frequency + correction)`.
The `correction` term is added in to make the division more stable for rare categorical values.
The default value for `correction` is 1.0.
Note that target encoding is effective with models that cannot automatically
learn dense representations to categorical features, such as decision forests
or kernel methods. If neural network models are used, its recommended to
encode categorical features as embeddings.
### Implement Binary Target Encoder
For simplicity, we assume that the inputs for the `adapt` and `call` methods
are in the expected data types and shapes, so no validation logic is added.
It is recommended to pass the `vocabulary_size` of the categorical feature to the
`BinaryTargetEncoding` constructor. If not specified, it will be computed during
the `adapt()` method execution.
```python
class BinaryTargetEncoding(layers.Layer):
def __init__(self, vocabulary_size=None, correction=1.0, **kwargs):
super().__init__(**kwargs)
self.vocabulary_size = vocabulary_size
self.correction = correction
def adapt(self, data):
# data is expected to be an integer numpy array to a Tensor shape [num_exmples, 2].
# This contains feature values for a given feature in the dataset, and target values.
# Convert the data to a tensor.
data = tf.convert_to_tensor(data)
# Separate the feature values and target values
feature_values = tf.cast(data[:, 0], tf.dtypes.int32)
target_values = tf.cast(data[:, 1], tf.dtypes.bool)
# Compute the vocabulary_size of not specified.
if self.vocabulary_size is None:
self.vocabulary_size = tf.unique(feature_values).y.shape[0]
# Filter the data where the target label is positive.
positive_indices = tf.where(condition=target_values)
postive_feature_values = tf.gather_nd(
params=feature_values, indices=positive_indices
)
# Compute how many times each feature value occurred with a positive target label.
positive_frequency = tf.math.unsorted_segment_sum(
data=tf.ones(
shape=(postive_feature_values.shape[0], 1), dtype=tf.dtypes.float64
),
segment_ids=postive_feature_values,
num_segments=self.vocabulary_size,
)
# Filter the data where the target label is negative.
negative_indices = tf.where(condition=tf.math.logical_not(target_values))
negative_feature_values = tf.gather_nd(
params=feature_values, indices=negative_indices
)
# Compute how many times each feature value occurred with a negative target label.
negative_frequency = tf.math.unsorted_segment_sum(
data=tf.ones(
shape=(negative_feature_values.shape[0], 1), dtype=tf.dtypes.float64
),
segment_ids=negative_feature_values,
num_segments=self.vocabulary_size,
)
# Compute positive probability for the input feature values.
positive_probability = positive_frequency / (
positive_frequency + negative_frequency + self.correction
)
# Concatenate the computed statistics for traget_encoding.
target_encoding_statistics = tf.cast(
tf.concat(
[positive_frequency, negative_frequency, positive_probability], axis=1
),
dtype=tf.dtypes.float32,
)
self.target_encoding_statistics = tf.constant(target_encoding_statistics)
def call(self, inputs):
# inputs is expected to be an integer numpy array to a Tensor shape [num_exmples, 1].
# This includes the feature values for a given feature in the dataset.
# Raise an error if the target encoding statistics are not computed.
if self.target_encoding_statistics == None:
raise ValueError(
f"You need to call the adapt method to compute target encoding statistics."
)
# Convert the inputs to a tensor.
inputs = tf.convert_to_tensor(inputs)
# Cast the inputs int64 a tensor.
inputs = tf.cast(inputs, tf.dtypes.int64)
# Lookup target encoding statistics for the input feature values.
target_encoding_statistics = tf.cast(
tf.gather_nd(self.target_encoding_statistics, inputs),
dtype=tf.dtypes.float32,
)
return target_encoding_statistics
```
Let's test the binary target encoder
```python
data = tf.constant(
[
[0, 1],
[2, 0],
[0, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0],
[0, 1],
[2, 1],
[1, 0],
[0, 1],
[2, 0],
[0, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0],
[0, 1],
[2, 0],
]
)
binary_target_encoder = BinaryTargetEncoding()
binary_target_encoder.adapt(data)
print(binary_target_encoder([[0], [1], [2]]))
```
<div class="k-default-codeblock">
```
tf.Tensor(
[[6. 0. 0.85714287]
[4. 3. 0.5 ]
[1. 5. 0.14285715]], shape=(3, 3), dtype=float32)
```
</div>
### Create model inputs
```python
def create_model_inputs():
inputs = {}
for feature_name in NUMERIC_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype=tf.float32
)
for feature_name in CATEGORICAL_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype=tf.string
)
return inputs
```
### Implement a feature encoding with target encoding
```python
def create_target_encoder():
inputs = create_model_inputs()
target_values = train_data[[TARGET_COLUMN_NAME]].to_numpy()
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURE_NAMES:
# Get the vocabulary of the categorical feature.
vocabulary = sorted(
[str(value) for value in list(train_data[feature_name].unique())]
)
# Create a lookup to convert string values to an integer indices.
# Since we are not using a mask token nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
lookup = layers.StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
# Convert the string input values into integer indices.
value_indices = lookup(inputs[feature_name])
# Prepare the data to adapt the target encoding.
print("### Adapting target encoding for:", feature_name)
feature_values = train_data[[feature_name]].to_numpy().astype(str)
feature_value_indices = lookup(feature_values)
data = tf.concat([feature_value_indices, target_values], axis=1)
feature_encoder = BinaryTargetEncoding()
feature_encoder.adapt(data)
# Convert the feature value indices to target encoding representations.
encoded_feature = feature_encoder(tf.expand_dims(value_indices, -1))
else:
# Expand the dimensions of the numerical input feature and use it as-is.
encoded_feature = tf.expand_dims(inputs[feature_name], -1)
# Add the encoded feature to the list.
encoded_features.append(encoded_feature)
# Concatenate all the encoded features.
encoded_features = tf.concat(encoded_features, axis=1)
# Create and return a Keras model with encoded features as outputs.
return keras.Model(inputs=inputs, outputs=encoded_features)
```
### Create a Gradient Boosted Trees model with a preprocessor
In this scenario, we use the target encoding as a preprocessor for the Gradient Boosted Tree model,
and let the model infer semantics of the input features.
```python
def create_gbt_with_preprocessor(preprocessor):
gbt_model = tfdf.keras.GradientBoostedTreesModel(
preprocessing=preprocessor,
num_trees=NUM_TREES,
max_depth=MAX_DEPTH,
min_examples=MIN_EXAMPLES,
subsample=SUBSAMPLE,
validation_ratio=VALIDATION_RATIO,
task=tfdf.keras.Task.CLASSIFICATION,
)
gbt_model.compile(metrics=[keras.metrics.BinaryAccuracy(name="accuracy")])
return gbt_model
```
### Train and evaluate the model
```python
gbt_model = create_gbt_with_preprocessor(create_target_encoder())
run_experiment(gbt_model, train_data, test_data)
```
<div class="k-default-codeblock">
```
### Adapting target encoding for: class_of_worker
### Adapting target encoding for: detailed_industry_recode
### Adapting target encoding for: detailed_occupation_recode
### Adapting target encoding for: education
### Adapting target encoding for: enroll_in_edu_inst_last_wk
### Adapting target encoding for: marital_stat
### Adapting target encoding for: major_industry_code
### Adapting target encoding for: major_occupation_code
### Adapting target encoding for: race
### Adapting target encoding for: hispanic_origin
### Adapting target encoding for: sex
### Adapting target encoding for: member_of_a_labor_union
### Adapting target encoding for: reason_for_unemployment
### Adapting target encoding for: full_or_part_time_employment_stat
### Adapting target encoding for: tax_filer_stat
### Adapting target encoding for: region_of_previous_residence
### Adapting target encoding for: state_of_previous_residence
### Adapting target encoding for: detailed_household_and_family_stat
### Adapting target encoding for: detailed_household_summary_in_household
### Adapting target encoding for: migration_code-change_in_msa
### Adapting target encoding for: migration_code-change_in_reg
### Adapting target encoding for: migration_code-move_within_reg
### Adapting target encoding for: live_in_this_house_1_year_ago
### Adapting target encoding for: migration_prev_res_in_sunbelt
### Adapting target encoding for: family_members_under_18
### Adapting target encoding for: country_of_birth_father
### Adapting target encoding for: country_of_birth_mother
### Adapting target encoding for: country_of_birth_self
### Adapting target encoding for: citizenship
### Adapting target encoding for: own_business_or_self_employed
### Adapting target encoding for: fill_inc_questionnaire_for_veteran's_admin
### Adapting target encoding for: veterans_benefits
### Adapting target encoding for: year
Use /tmp/tmpj_0h78ld as temporary training directory
Starting reading the dataset
198/200 [============================>.] - ETA: 0s
Dataset read in 0:00:06.793717
Training model
Model trained in 0:04:32.752691
Compiling model
200/200 [==============================] - 280s 1s/step
Test accuracy: 95.81%
```
</div>
---
## Experiment 3: Decision Forests with trained embeddings
In this scenario, we build an encoder model that codes the categorical
features to embeddings, where the size of the embedding for a given categorical
feature is the square root to the size of its vocabulary.
We train these embeddings in a simple NN model through backpropagation.
After the embedding encoder is trained, we used it as a preprocessor to the
input features of a Gradient Boosted Tree model.
Note that the embeddings and a decision forest model cannot be trained
synergically in one phase, since decision forest models do not train with backpropagation.
Rather, embeddings has to be trained in an initial phase,
and then used as static inputs to the decision forest model.
### Implement feature encoding with embeddings
```python
def create_embedding_encoder(size=None):
inputs = create_model_inputs()
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURE_NAMES:
# Get the vocabulary of the categorical feature.
vocabulary = sorted(
[str(value) for value in list(train_data[feature_name].unique())]
)
# Create a lookup to convert string values to an integer indices.
# Since we are not using a mask token nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
lookup = layers.StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
# Convert the string input values into integer indices.
value_index = lookup(inputs[feature_name])
# Create an embedding layer with the specified dimensions
vocabulary_size = len(vocabulary)
embedding_size = int(math.sqrt(vocabulary_size))
feature_encoder = layers.Embedding(
input_dim=len(vocabulary), output_dim=embedding_size
)
# Convert the index values to embedding representations.
encoded_feature = feature_encoder(value_index)
else:
# Expand the dimensions of the numerical input feature and use it as-is.
encoded_feature = tf.expand_dims(inputs[feature_name], -1)
# Add the encoded feature to the list.
encoded_features.append(encoded_feature)
# Concatenate all the encoded features.
encoded_features = layers.concatenate(encoded_features, axis=1)
# Apply dropout.
encoded_features = layers.Dropout(rate=0.25)(encoded_features)
# Perform non-linearity projection.
encoded_features = layers.Dense(
units=size if size else encoded_features.shape[-1], activation="gelu"
)(encoded_features)
# Create and return a Keras model with encoded features as outputs.
return keras.Model(inputs=inputs, outputs=encoded_features)
```
### Build an NN model to train the embeddings
```python
def create_nn_model(encoder):
inputs = create_model_inputs()
embeddings = encoder(inputs)
output = layers.Dense(units=1, activation="sigmoid")(embeddings)
nn_model = keras.Model(inputs=inputs, outputs=output)
nn_model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy("accuracy")],
)
return nn_model
embedding_encoder = create_embedding_encoder(size=64)
run_experiment(
create_nn_model(embedding_encoder),
train_data,
test_data,
num_epochs=5,
batch_size=256,
)
```
<div class="k-default-codeblock">
```
Epoch 1/5
200/200 [==============================] - 10s 27ms/step - loss: 8303.1455 - accuracy: 0.9193
Epoch 2/5
200/200 [==============================] - 5s 27ms/step - loss: 1019.4900 - accuracy: 0.9371
Epoch 3/5
200/200 [==============================] - 5s 27ms/step - loss: 612.2844 - accuracy: 0.9416
Epoch 4/5
200/200 [==============================] - 5s 27ms/step - loss: 858.9774 - accuracy: 0.9397
Epoch 5/5
200/200 [==============================] - 5s 26ms/step - loss: 842.3922 - accuracy: 0.9421
Test accuracy: 95.0%
```
</div>
### Train and evaluate a Gradient Boosted Tree model with embeddings
```python
gbt_model = create_gbt_with_preprocessor(embedding_encoder)
run_experiment(gbt_model, train_data, test_data)
```
<div class="k-default-codeblock">
```
Use /tmp/tmpao5o88p6 as temporary training directory
Starting reading the dataset
199/200 [============================>.] - ETA: 0s
Dataset read in 0:00:06.722677
Training model
Model trained in 0:05:18.350298
Compiling model
200/200 [==============================] - 325s 2s/step
Test accuracy: 95.82%
```
</div>
---
## Concluding remarks
TensorFlow Decision Forests provide powerful models, especially with structured data.
In our experiments, the Gradient Boosted Tree model achieved 95.79% test accuracy.
When using the target encoding with categorical feature, the same model achieved 95.81% test accuracy.
When pretraining embeddings to be used as inputs to the Gradient Boosted Tree model,
we achieved 95.82% test accuracy.
Decision Forests can be used with Neural Networks, either by
1) using Neural Networks to learn useful representation of the input data,
and then using Decision Forests for the supervised learning task, or by
2) creating an ensemble of both Decision Forests and Neural Network models.
Note that TensorFlow Decision Forests does not (yet) support hardware accelerators.
All training and inference is done on the CPU.
Besides, Decision Forests require a finite dataset that fits in memory
for their training procedures. However, there are diminishing returns
for increasing the size of the dataset, and Decision Forests algorithms
arguably need fewer examples for convergence than large Neural Network models.
| keras-io/examples/structured_data/md/classification_with_tfdf.md/0 | {
"file_path": "keras-io/examples/structured_data/md/classification_with_tfdf.md",
"repo_id": "keras-io",
"token_count": 35171
} | 104 |
"""
Title: Event classification for payment card fraud detection
Author: [achoum](https://github.com/achoum/)
Date created: 2024/02/01
Last modified: 2024/02/01
Description: Detection of fraudulent payment card transactions using Temporian and a feed-forward neural network.
Accelerator: GPU
"""
"""
This notebook depends on Keras 3, Temporian, and a few other libraries. You can
install them as follow:
```shell
pip install temporian keras pandas tf-nightly scikit-learn -U
```
"""
import keras # To train the Machine Learning model
import temporian as tp # To convert transactions into tabular data
import numpy as np
import os
import pandas as pd
import datetime
import math
import tensorflow as tf
from sklearn.metrics import RocCurveDisplay
"""
## Introduction
Payment fraud detection is critical for banks, businesses, and consumers. In
Europe alone, fraudulent transactions were estimated at
[€1.89 billion in 2019](https://www.ecb.europa.eu/pub/pdf/cardfraud/ecb.cardfraudreport202110~cac4c418e8.en.pdf).
Worldwide, approximately
[3.6%](https://www.cybersource.com/content/dam/documents/campaign/fraud-report/global-fraud-report-2022.pdf)
of commerce revenue is lost to fraud. In this notebook, we train and evaluate a
model to detect fraudulent transactions using the synthetic dataset attached to
the book
[Reproducible Machine Learning for Credit Card Fraud Detection](https://fraud-detection-handbook.github.io/fraud-detection-handbook/Foreword.html)
by Le Borgne et al.
Fraudulent transactions often cannot be detected by looking at transactions in
isolation. Instead, fraudulent transactions are detected by looking at patterns
across multiple transactions from the same user, to the same merchant, or with
other types of relationships. To express these relationships in a way that is
understandable by a machine learning model, and to augment features with feature
engineering, we We use the
[Temporian](https://temporian.readthedocs.io/en/latest) preprocessing library.
We preprocess a transaction dataset into a tabular dataset and use a
feed-forward neural network to learn the patterns of fraud and make predictions.
## Loading the dataset
The dataset contains payment transactions sampled between April 1, 2018 and
September 30, 2018. The transactions are stored in CSV files, one for each day.
**Note:** Downloading the dataset takes ~1 minute.
"""
start_date = datetime.date(2018, 4, 1)
end_date = datetime.date(2018, 9, 30)
# Load the dataset as a Pandas dataframe.
cache_path = "fraud_detection_cache.csv"
if not os.path.exists(cache_path):
print("Download dataset")
dataframes = []
num_files = (end_date - start_date).days
counter = 0
while start_date <= end_date:
if counter % (num_files // 10) == 0:
print(f"[{100 * (counter+1) // num_files}%]", end="", flush=True)
print(".", end="", flush=True)
url = f"https://github.com/Fraud-Detection-Handbook/simulated-data-raw/raw/6e67dbd0a3bfe0d7ec33abc4bce5f37cd4ff0d6a/data/{start_date}.pkl"
dataframes.append(pd.read_pickle(url))
start_date += datetime.timedelta(days=1)
counter += 1
print("done", flush=True)
transactions_dataframe = pd.concat(dataframes)
transactions_dataframe.to_csv(cache_path, index=False)
else:
print("Load dataset from cache")
transactions_dataframe = pd.read_csv(
cache_path, dtype={"CUSTOMER_ID": bytes, "TERMINAL_ID": bytes}
)
print(f"Found {len(transactions_dataframe)} transactions")
"""
Each transaction is represented by a single row, with the following columns of
interest:
- **TX_DATETIME**: The date and time of the transaction.
- **CUSTOMER_ID**: The unique identifier of the customer.
- **TERMINAL_ID**: The identifier of the terminal where the transaction was
made.
- **TX_AMOUNT**: The amount of the transaction.
- **TX_FRAUD**: Whether the transaction is fraudulent (1) or not (0).
"""
transactions_dataframe = transactions_dataframe[
["TX_DATETIME", "CUSTOMER_ID", "TERMINAL_ID", "TX_AMOUNT", "TX_FRAUD"]
]
transactions_dataframe.head(4)
"""
The dataset is highly imbalanced, with the majority of transactions being
legitimate.
"""
fraudulent_rate = transactions_dataframe["TX_FRAUD"].mean()
print("Rate of fraudulent transactions:", fraudulent_rate)
"""
The
[pandas dataframe](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html)
is converted into a
[Temporian EventSet](https://temporian.readthedocs.io/en/latest/reference/temporian/EventSet/),
which is better suited for the data exploration and feature preprocessing of the
next steps.
"""
transactions_evset = tp.from_pandas(transactions_dataframe, timestamps="TX_DATETIME")
transactions_evset
"""
It is possible to plot the entire dataset, but the resulting plot will be
difficult to read. Instead, we can group the transactions per client.
"""
transactions_evset.add_index("CUSTOMER_ID").plot(indexes="3774")
"""
Note the few fraudulent transactions for this client.
## Preparing the training data
Fraudulent transactions in isolation cannot be detected. Instead, we need to
connect related transactions. For each transaction, we compute the sum and count
of transactions for the same terminal in the last `n` days. Because we don't
know the correct value for `n`, we use multiple values for `n` and compute a
set of features for each of them.
"""
# Group the transactions per terminal
transactions_per_terminal = transactions_evset.add_index("TERMINAL_ID")
# Moving statistics per terminal
tmp_features = []
for n in [7, 14, 28]:
tmp_features.append(
transactions_per_terminal["TX_AMOUNT"]
.moving_sum(tp.duration.days(n))
.rename(f"sum_transactions_{n}_days")
)
tmp_features.append(
transactions_per_terminal.moving_count(tp.duration.days(n)).rename(
f"count_transactions_{n}_days"
)
)
feature_set_1 = tp.glue(*tmp_features)
feature_set_1
"""
Let's look at the features of terminal "3774".
"""
feature_set_1.plot(indexes="3774")
"""
A transaction's fraudulent status is not known at the time of the transaction
(otherwise, there would be no problem). However, the banks knows if a
transacation is fraudulent one week after it is made. We create a set of
features that indicate the number and ratio of fraudulent transactions in the
last N days.
"""
# Lag the transactions by one week.
lagged_transactions = transactions_per_terminal.lag(tp.duration.weeks(1))
# Moving statistics per customer
tmp_features = []
for n in [7, 14, 28]:
tmp_features.append(
lagged_transactions["TX_FRAUD"]
.moving_sum(tp.duration.days(n), sampling=transactions_per_terminal)
.rename(f"count_fraud_transactions_{n}_days")
)
tmp_features.append(
lagged_transactions["TX_FRAUD"]
.cast(tp.float32)
.simple_moving_average(tp.duration.days(n), sampling=transactions_per_terminal)
.rename(f"rate_fraud_transactions_{n}_days")
)
feature_set_2 = tp.glue(*tmp_features)
"""
Transaction date and time can be correlated with fraud. While each transaction
has a timestamp, a machine learning model might struggle to consume them
directly. Instead, we extract various informative calendar features from the
timestamps, such as hour, day of the week (e.g., Monday, Tuesday), and day of
the month (1-31).
"""
feature_set_3 = tp.glue(
transactions_per_terminal.calendar_hour(),
transactions_per_terminal.calendar_day_of_week(),
)
"""
Finally, we group together all the features and the label.
"""
all_data = tp.glue(
transactions_per_terminal, feature_set_1, feature_set_2, feature_set_3
).drop_index()
print("All the available features:")
all_data.schema.feature_names()
"""
We extract the name of the input features.
"""
input_feature_names = [k for k in all_data.schema.feature_names() if k.islower()]
print("The model's input features:")
input_feature_names
"""
For neural networks to work correctly, numerical inputs must be normalized. A
common approach is to apply z-normalization, which involves subtracting the mean
and dividing by the standard deviation estimated from the training data to each
value. In forecasting, such z-normalization is not recommended as it would lead
to future leakage. Specifically, to classify a transaction at time t, we cannot
rely on data after time t since, at serving time when making a prediction at
time t, no subsequent data is available yet. In short, at time t, we are limited
to using data that precedes or is concurrent with time t.
The solution is therefore to apply z-normalization **over time**, which means
that we normalize each transaction using the mean and standard deviation
computed from the past data **for that transaction**.
Future leakage is pernicious. Luckily, Temporian is here to help: the only
operator that can cause future leakage is `EventSet.leak()`. If you are not
using `EventSet.leak()`, your preprocessing is **guaranteed** not to create
future leakage.
**Note:** For advanced pipelines, you can also check programatically that a
feature does not depends on an `EventSet.leak()` operation.
"""
# Cast all values (e.g. ints) to floats.
values = all_data[input_feature_names].cast(tp.float32)
# Apply z-normalization overtime.
normalized_features = (
values - values.simple_moving_average(math.inf)
) / values.moving_standard_deviation(math.inf)
# Restore the original name of the features.
normalized_features = normalized_features.rename(values.schema.feature_names())
print(normalized_features)
"""
The first transactions will be normalized using poor estimates of the mean and
standard deviation since there are only a few transactions before them. To
mitigate this issue, we remove the first week of data from the training dataset.
Notice that the first values contain NaN. In Temporian, NaN represents missing
values, and all operators handle them accordingly. For instance, when
calculating a moving average, NaN values are not included in the calculation
and do not generate a NaN result.
However, neural networks cannot natively handle NaN values. So, we replace them
with zeros.
"""
normalized_features = normalized_features.fillna(0.0)
"""
Finally, we group together the features and the labels.
"""
normalized_all_data = tp.glue(normalized_features, all_data["TX_FRAUD"])
"""
## Split dataset into a train, validation and test set
To evaluate the quality of our machine learning model, we need training,
validation and test sets. Since the system is dynamic (new fraud patterns are
being created all the time), it is important for the training set to come before
the validation set, and the validation set come before the testing set:
- **Training:** April 8, 2018 to July 31, 2018
- **Validation:** August 1, 2018 to August 31, 2018
- **Testing:** September 1, 2018 to September 30, 2018
For the example to run faster, we will effectively reduce the size of the
training set to:
- **Training:** July 1, 2018 to July 31, 2018
"""
# begin_train = datetime.datetime(2018, 4, 8).timestamp() # Full training dataset
begin_train = datetime.datetime(2018, 7, 1).timestamp() # Reduced training dataset
begin_valid = datetime.datetime(2018, 8, 1).timestamp()
begin_test = datetime.datetime(2018, 9, 1).timestamp()
is_train = (normalized_all_data.timestamps() >= begin_train) & (
normalized_all_data.timestamps() < begin_valid
)
is_valid = (normalized_all_data.timestamps() >= begin_valid) & (
normalized_all_data.timestamps() < begin_test
)
is_test = normalized_all_data.timestamps() >= begin_test
"""
`is_train`, `is_valid` and `is_test` are boolean features overtime that indicate
the limit of the tree folds. Let's plot them.
"""
tp.plot(
[
is_train.rename("is_train"),
is_valid.rename("is_valid"),
is_test.rename("is_test"),
]
)
"""
We filter the input features and label in each fold.
"""
train_ds_evset = normalized_all_data.filter(is_train)
valid_ds_evset = normalized_all_data.filter(is_valid)
test_ds_evset = normalized_all_data.filter(is_test)
print(f"Training examples: {train_ds_evset.num_events()}")
print(f"Validation examples: {valid_ds_evset.num_events()}")
print(f"Testing examples: {test_ds_evset.num_events()}")
"""
It is important to split the dataset **after** the features have been computed
because some of the features for the training dataset are computed from
transactions during the training window.
## Create TensorFlow datasets
We convert the datasets from EventSets to TensorFlow Datasets as Keras consumes
them natively.
"""
non_batched_train_ds = tp.to_tensorflow_dataset(train_ds_evset)
non_batched_valid_ds = tp.to_tensorflow_dataset(valid_ds_evset)
non_batched_test_ds = tp.to_tensorflow_dataset(test_ds_evset)
"""
The following processing steps are applied using TensorFlow datasets:
1. The features and labels are separated using `extract_features_and_label` in
the format that Keras expects.
1. The dataset is batched, which means that the examples are grouped into
mini-batches.
1. The training examples are shuffled to improve the quality of mini-batch
training.
As we noted before, the dataset is imbalanced in the direction of legitimate
transactions. While we want to evaluate our model on this original distribution,
neural networks often train poorly on strongly imbalanced datasets. Therefore,
we resample the training dataset to a ratio of 80% legitimate / 20% fraudulent
using `rejection_resample`.
"""
def extract_features_and_label(example):
features = {k: example[k] for k in input_feature_names}
labels = tf.cast(example["TX_FRAUD"], tf.int32)
return features, labels
# Target ratio of fraudulent transactions in the training dataset.
target_rate = 0.2
# Number of examples in a mini-batch.
batch_size = 32
train_ds = (
non_batched_train_ds.shuffle(10000)
.rejection_resample(
class_func=lambda x: tf.cast(x["TX_FRAUD"], tf.int32),
target_dist=[1 - target_rate, target_rate],
initial_dist=[1 - fraudulent_rate, fraudulent_rate],
)
.map(lambda _, x: x) # Remove the label copy added by "rejection_resample".
.batch(batch_size)
.map(extract_features_and_label)
.prefetch(tf.data.AUTOTUNE)
)
# The test and validation dataset does not need resampling or shuffling.
valid_ds = (
non_batched_valid_ds.batch(batch_size)
.map(extract_features_and_label)
.prefetch(tf.data.AUTOTUNE)
)
test_ds = (
non_batched_test_ds.batch(batch_size)
.map(extract_features_and_label)
.prefetch(tf.data.AUTOTUNE)
)
"""
We print the first four examples of the training dataset. This is a simple way
to identify some of the errors that could have been made above.
"""
for features, labels in train_ds.take(1):
print("features")
for feature_name, feature_value in features.items():
print(f"\t{feature_name}: {feature_value[:4]}")
print(f"labels: {labels[:4]}")
"""
## Train the model
The original dataset is transactional, but the processed data is tabular and
only contains normalized numerical values. Therefore, we train a feed-forward
neural network.
"""
inputs = [keras.Input(shape=(1,), name=name) for name in input_feature_names]
x = keras.layers.concatenate(inputs)
x = keras.layers.Dense(32, activation="sigmoid")(x)
x = keras.layers.Dense(16, activation="sigmoid")(x)
x = keras.layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs=inputs, outputs=x)
"""
Our goal is to differentiate between the fraudulent and legitimate transactions,
so we use a binary classification objective. Because the dataset is imbalanced,
accuracy is not an informative metric. Instead, we evaluate the model using the
[area under the curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve)
(AUC).
"""
model.compile(
optimizer=keras.optimizers.Adam(0.01),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.Accuracy(), keras.metrics.AUC()],
)
model.fit(train_ds, validation_data=valid_ds)
"""
We evaluate the model on the test dataset.
"""
model.evaluate(test_ds)
"""
With and AUC of ~83%, our simple fraud detector is showing encouraging
results.
Plotting the ROC curve is a good solution to understand and select the operation
point of the model i.e. the threshold applied on the model output to
differentiate between fraudulent and legitimate transactions.
Compute the test predictions:
"""
predictions = model.predict(test_ds)
predictions = np.nan_to_num(predictions, nan=0)
"""
Extract the labels from the test set:
"""
labels = np.concatenate([label for _, label in test_ds])
"""
Finaly, we plot the ROC curve.
"""
_ = RocCurveDisplay.from_predictions(labels, predictions)
"""
The Keras model is ready to be used on transactions with an unknown fraud
status, a.k.a. serving. We save the model on disk for future use.
**Note:** The model does not include the data preparation and preprocessing steps
done in Pandas and Temporian. They have to be applied manually to the data fed
into the model. While not demonstrated here, Temporian preprocessing can also be
saved to disk with
[tp.save](https://temporian.readthedocs.io/en/latest/reference/temporian/serialization/save/).
"""
model.save("fraud_detection_model.keras")
"""
The model can be later reloaded with:
"""
loaded_model = keras.saving.load_model("fraud_detection_model.keras")
# Generate predictions with the loaded model on 5 test examples.
loaded_model.predict(test_ds.rebatch(5).take(1))
"""
## Conclusion
We trained a feed-forward neural network to identify fraudulent transactions. To
feed them into the model, the transactions were preprocessed and transformed
into a tabular dataset using
[Temporian](https://temporian.readthedocs.io/en/latest/). Now, a question to the
reader: What could be done to further improve the model's performance?
Here are some ideas:
- Train the model on the entire dataset instead of a single month of data.
- Train the model for more epochs and use early stopping to ensure that the
model is fully trained without overfitting.
- Make the feed-forward network more powerful by increasing the number of layers
while ensuring that the model is regularized.
- Compute additional preprocessing features. For example, in addition to
aggregating transactions by terminal, aggregate transactions by client.
- Use the Keras Tuner to perform hyperparameter tuning on the model. Note that
the parameters of the preprocessing (e.g., the number of days of
aggregations) are also hyperparameters that can be tuned.
"""
| keras-io/examples/timeseries/event_classification_for_payment_card_fraud_detection.py/0 | {
"file_path": "keras-io/examples/timeseries/event_classification_for_payment_card_fraud_detection.py",
"repo_id": "keras-io",
"token_count": 5777
} | 105 |
# Electroencephalogram Signal Classification for action identification
**Author:** [Suvaditya Mukherjee](https://github.com/suvadityamuk)<br>
**Date created:** 2022/11/03<br>
**Last modified:** 2022/11/05<br>
**Description:** Training a Convolutional model to classify EEG signals produced by exposure to certain stimuli.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/timeseries/ipynb/eeg_signal_classification.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/timeseries/eeg_signal_classification.py)
---
## Introduction
The following example explores how we can make a Convolution-based Neural Network to
perform classification on Electroencephalogram signals captured when subjects were
exposed to different stimuli.
We train a model from scratch since such signal-classification models are fairly scarce
in pre-trained format.
The data we use is sourced from the UC Berkeley-Biosense Lab where the data was collected
from 15 subjects at the same time.
Our process is as follows:
- Load the [UC Berkeley-Biosense Synchronized Brainwave Dataset](https://www.kaggle.com/datasets/berkeley-biosense/synchronized-brainwave-dataset)
- Visualize random samples from the data
- Pre-process, collate and scale the data to finally make a `tf.data.Dataset`
- Prepare class weights in order to tackle major imbalances
- Create a Conv1D and Dense-based model to perform classification
- Define callbacks and hyperparameters
- Train the model
- Plot metrics from History and perform evaluation
This example needs the following external dependencies (Gdown, Scikit-learn, Pandas,
Numpy, Matplotlib). You can install it via the following commands.
Gdown is an external package used to download large files from Google Drive. To know
more, you can refer to its [PyPi page here](https://pypi.org/project/gdown)
---
## Setup and Data Downloads
First, lets install our dependencies:
```python
!pip install gdown -q
!pip install sklearn -q
!pip install pandas -q
!pip install numpy -q
!pip install matplotlib -q
```
Next, lets download our dataset.
The gdown package makes it easy to download the data from Google Drive:
```python
!gdown 1V5B7Bt6aJm0UHbR7cRKBEK8jx7lYPVuX
!# gdown will download eeg-data.csv onto the local drive for use. Total size of
!# eeg-data.csv is 105.7 MB
```
```python
import pandas as pd
import matplotlib.pyplot as plt
import json
import numpy as np
import keras
from keras import layers
import tensorflow as tf
from sklearn import preprocessing, model_selection
import random
QUALITY_THRESHOLD = 128
BATCH_SIZE = 64
SHUFFLE_BUFFER_SIZE = BATCH_SIZE * 2
```
<div class="k-default-codeblock">
```
Downloading...
From (uriginal): https://drive.google.com/uc?id=1V5B7Bt6aJm0UHbR7cRKBEK8jx7lYPVuX
From (redirected): https://drive.google.com/uc?id=1V5B7Bt6aJm0UHbR7cRKBEK8jx7lYPVuX&confirm=t&uuid=4d50d1e7-44b5-4984-aa04-cb4e08803cb8
To: /home/fchollet/keras-io/scripts/tmp_3333846/eeg-data.csv
100%|█████████████████████████████████████████| 106M/106M [00:00<00:00, 259MB/s]
```
</div>
---
## Read data from `eeg-data.csv`
We use the Pandas library to read the `eeg-data.csv` file and display the first 5 rows
using the `.head()` command
```python
eeg = pd.read_csv("eeg-data.csv")
```
We remove unlabeled samples from our dataset as they do not contribute to the model. We
also perform a `.drop()` operation on the columns that are not required for training data
preparation
```python
unlabeled_eeg = eeg[eeg["label"] == "unlabeled"]
eeg = eeg.loc[eeg["label"] != "unlabeled"]
eeg = eeg.loc[eeg["label"] != "everyone paired"]
eeg.drop(
[
"indra_time",
"Unnamed: 0",
"browser_latency",
"reading_time",
"attention_esense",
"meditation_esense",
"updatedAt",
"createdAt",
],
axis=1,
inplace=True,
)
eeg.reset_index(drop=True, inplace=True)
eeg.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
<div class="k-default-codeblock">
```
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
```
</div>
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>id</th>
<th>eeg_power</th>
<th>raw_values</th>
<th>signal_quality</th>
<th>label</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>7</td>
<td>[56887.0, 45471.0, 20074.0, 5359.0, 22594.0, 7...</td>
<td>[99.0, 96.0, 91.0, 89.0, 91.0, 89.0, 87.0, 93....</td>
<td>0</td>
<td>blinkInstruction</td>
</tr>
<tr>
<th>1</th>
<td>5</td>
<td>[11626.0, 60301.0, 5805.0, 15729.0, 4448.0, 33...</td>
<td>[23.0, 40.0, 64.0, 89.0, 86.0, 33.0, -14.0, -1...</td>
<td>0</td>
<td>blinkInstruction</td>
</tr>
<tr>
<th>2</th>
<td>1</td>
<td>[15777.0, 33461.0, 21385.0, 44193.0, 11741.0, ...</td>
<td>[41.0, 26.0, 16.0, 20.0, 34.0, 51.0, 56.0, 55....</td>
<td>0</td>
<td>blinkInstruction</td>
</tr>
<tr>
<th>3</th>
<td>13</td>
<td>[311822.0, 44739.0, 19000.0, 19100.0, 2650.0, ...</td>
<td>[208.0, 198.0, 122.0, 84.0, 161.0, 249.0, 216....</td>
<td>0</td>
<td>blinkInstruction</td>
</tr>
<tr>
<th>4</th>
<td>4</td>
<td>[687393.0, 10289.0, 2942.0, 9874.0, 1059.0, 29...</td>
<td>[129.0, 133.0, 114.0, 105.0, 101.0, 109.0, 99....</td>
<td>0</td>
<td>blinkInstruction</td>
</tr>
</tbody>
</table>
</div>
In the data, the samples recorded are given a score from 0 to 128 based on how
well-calibrated the sensor was (0 being best, 200 being worst). We filter the values
based on an arbitrary cutoff limit of 128.
```python
def convert_string_data_to_values(value_string):
str_list = json.loads(value_string)
return str_list
eeg["raw_values"] = eeg["raw_values"].apply(convert_string_data_to_values)
eeg = eeg.loc[eeg["signal_quality"] < QUALITY_THRESHOLD]
eeg.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
<div class="k-default-codeblock">
```
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
```
</div>
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>id</th>
<th>eeg_power</th>
<th>raw_values</th>
<th>signal_quality</th>
<th>label</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>7</td>
<td>[56887.0, 45471.0, 20074.0, 5359.0, 22594.0, 7...</td>
<td>[99.0, 96.0, 91.0, 89.0, 91.0, 89.0, 87.0, 93....</td>
<td>0</td>
<td>blinkInstruction</td>
</tr>
<tr>
<th>1</th>
<td>5</td>
<td>[11626.0, 60301.0, 5805.0, 15729.0, 4448.0, 33...</td>
<td>[23.0, 40.0, 64.0, 89.0, 86.0, 33.0, -14.0, -1...</td>
<td>0</td>
<td>blinkInstruction</td>
</tr>
<tr>
<th>2</th>
<td>1</td>
<td>[15777.0, 33461.0, 21385.0, 44193.0, 11741.0, ...</td>
<td>[41.0, 26.0, 16.0, 20.0, 34.0, 51.0, 56.0, 55....</td>
<td>0</td>
<td>blinkInstruction</td>
</tr>
<tr>
<th>3</th>
<td>13</td>
<td>[311822.0, 44739.0, 19000.0, 19100.0, 2650.0, ...</td>
<td>[208.0, 198.0, 122.0, 84.0, 161.0, 249.0, 216....</td>
<td>0</td>
<td>blinkInstruction</td>
</tr>
<tr>
<th>4</th>
<td>4</td>
<td>[687393.0, 10289.0, 2942.0, 9874.0, 1059.0, 29...</td>
<td>[129.0, 133.0, 114.0, 105.0, 101.0, 109.0, 99....</td>
<td>0</td>
<td>blinkInstruction</td>
</tr>
</tbody>
</table>
</div>
---
## Visualize one random sample from the data
We visualize one sample from the data to understand how the stimulus-induced signal looks
like
```python
def view_eeg_plot(idx):
data = eeg.loc[idx, "raw_values"]
plt.plot(data)
plt.title(f"Sample random plot")
plt.show()
view_eeg_plot(7)
```

---
## Pre-process and collate data
There are a total of 67 different labels present in the data, where there are numbered
sub-labels. We collate them under a single label as per their numbering and replace them
in the data itself. Following this process, we perform simple Label encoding to get them
in an integer format.
```python
print("Before replacing labels")
print(eeg["label"].unique(), "\n")
print(len(eeg["label"].unique()), "\n")
eeg.replace(
{
"label": {
"blink1": "blink",
"blink2": "blink",
"blink3": "blink",
"blink4": "blink",
"blink5": "blink",
"math1": "math",
"math2": "math",
"math3": "math",
"math4": "math",
"math5": "math",
"math6": "math",
"math7": "math",
"math8": "math",
"math9": "math",
"math10": "math",
"math11": "math",
"math12": "math",
"thinkOfItems-ver1": "thinkOfItems",
"thinkOfItems-ver2": "thinkOfItems",
"video-ver1": "video",
"video-ver2": "video",
"thinkOfItemsInstruction-ver1": "thinkOfItemsInstruction",
"thinkOfItemsInstruction-ver2": "thinkOfItemsInstruction",
"colorRound1-1": "colorRound1",
"colorRound1-2": "colorRound1",
"colorRound1-3": "colorRound1",
"colorRound1-4": "colorRound1",
"colorRound1-5": "colorRound1",
"colorRound1-6": "colorRound1",
"colorRound2-1": "colorRound2",
"colorRound2-2": "colorRound2",
"colorRound2-3": "colorRound2",
"colorRound2-4": "colorRound2",
"colorRound2-5": "colorRound2",
"colorRound2-6": "colorRound2",
"colorRound3-1": "colorRound3",
"colorRound3-2": "colorRound3",
"colorRound3-3": "colorRound3",
"colorRound3-4": "colorRound3",
"colorRound3-5": "colorRound3",
"colorRound3-6": "colorRound3",
"colorRound4-1": "colorRound4",
"colorRound4-2": "colorRound4",
"colorRound4-3": "colorRound4",
"colorRound4-4": "colorRound4",
"colorRound4-5": "colorRound4",
"colorRound4-6": "colorRound4",
"colorRound5-1": "colorRound5",
"colorRound5-2": "colorRound5",
"colorRound5-3": "colorRound5",
"colorRound5-4": "colorRound5",
"colorRound5-5": "colorRound5",
"colorRound5-6": "colorRound5",
"colorInstruction1": "colorInstruction",
"colorInstruction2": "colorInstruction",
"readyRound1": "readyRound",
"readyRound2": "readyRound",
"readyRound3": "readyRound",
"readyRound4": "readyRound",
"readyRound5": "readyRound",
"colorRound1": "colorRound",
"colorRound2": "colorRound",
"colorRound3": "colorRound",
"colorRound4": "colorRound",
"colorRound5": "colorRound",
}
},
inplace=True,
)
print("After replacing labels")
print(eeg["label"].unique())
print(len(eeg["label"].unique()))
le = preprocessing.LabelEncoder() # Generates a look-up table
le.fit(eeg["label"])
eeg["label"] = le.transform(eeg["label"])
```
<div class="k-default-codeblock">
```
Before replacing labels
['blinkInstruction' 'blink1' 'blink2' 'blink3' 'blink4' 'blink5'
'relaxInstruction' 'relax' 'mathInstruction' 'math1' 'math2' 'math3'
'math4' 'math5' 'math6' 'math7' 'math8' 'math9' 'math10' 'math11'
'math12' 'musicInstruction' 'music' 'videoInstruction' 'video-ver1'
'thinkOfItemsInstruction-ver1' 'thinkOfItems-ver1' 'colorInstruction1'
'colorInstruction2' 'readyRound1' 'colorRound1-1' 'colorRound1-2'
'colorRound1-3' 'colorRound1-4' 'colorRound1-5' 'colorRound1-6'
'readyRound2' 'colorRound2-1' 'colorRound2-2' 'colorRound2-3'
'colorRound2-4' 'colorRound2-5' 'colorRound2-6' 'readyRound3'
'colorRound3-1' 'colorRound3-2' 'colorRound3-3' 'colorRound3-4'
'colorRound3-5' 'colorRound3-6' 'readyRound4' 'colorRound4-1'
'colorRound4-2' 'colorRound4-3' 'colorRound4-4' 'colorRound4-5'
'colorRound4-6' 'readyRound5' 'colorRound5-1' 'colorRound5-2'
'colorRound5-3' 'colorRound5-4' 'colorRound5-5' 'colorRound5-6'
'video-ver2' 'thinkOfItemsInstruction-ver2' 'thinkOfItems-ver2']
```
</div>
<div class="k-default-codeblock">
```
67
```
</div>
<div class="k-default-codeblock">
```
After replacing labels
['blinkInstruction' 'blink' 'relaxInstruction' 'relax' 'mathInstruction'
'math' 'musicInstruction' 'music' 'videoInstruction' 'video'
'thinkOfItemsInstruction' 'thinkOfItems' 'colorInstruction' 'readyRound'
'colorRound1' 'colorRound2' 'colorRound3' 'colorRound4' 'colorRound5']
19
```
</div>
We extract the number of unique classes present in the data
```python
num_classes = len(eeg["label"].unique())
print(num_classes)
```
<div class="k-default-codeblock">
```
19
```
</div>
We now visualize the number of samples present in each class using a Bar plot.
```python
plt.bar(range(num_classes), eeg["label"].value_counts())
plt.title("Number of samples per class")
plt.show()
```

---
## Scale and split data
We perform a simple Min-Max scaling to bring the value-range between 0 and 1. We do not
use Standard Scaling as the data does not follow a Gaussian distribution.
```python
scaler = preprocessing.MinMaxScaler()
series_list = [
scaler.fit_transform(np.asarray(i).reshape(-1, 1)) for i in eeg["raw_values"]
]
labels_list = [i for i in eeg["label"]]
```
We now create a Train-test split with a 15% holdout set. Following this, we reshape the
data to create a sequence of length 512. We also convert the labels from their current
label-encoded form to a one-hot encoding to enable use of several different
`keras.metrics` functions.
```python
x_train, x_test, y_train, y_test = model_selection.train_test_split(
series_list, labels_list, test_size=0.15, random_state=42, shuffle=True
)
print(
f"Length of x_train : {len(x_train)}\nLength of x_test : {len(x_test)}\nLength of y_train : {len(y_train)}\nLength of y_test : {len(y_test)}"
)
x_train = np.asarray(x_train).astype(np.float32).reshape(-1, 512, 1)
y_train = np.asarray(y_train).astype(np.float32).reshape(-1, 1)
y_train = keras.utils.to_categorical(y_train)
x_test = np.asarray(x_test).astype(np.float32).reshape(-1, 512, 1)
y_test = np.asarray(y_test).astype(np.float32).reshape(-1, 1)
y_test = keras.utils.to_categorical(y_test)
```
<div class="k-default-codeblock">
```
Length of x_train : 8460
Length of x_test : 1494
Length of y_train : 8460
Length of y_test : 1494
```
</div>
---
## Prepare `tf.data.Dataset`
We now create a `tf.data.Dataset` from this data to prepare it for training. We also
shuffle and batch the data for use later.
```python
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)
```
---
## Make Class Weights using Naive method
As we can see from the plot of number of samples per class, the dataset is imbalanced.
Hence, we **calculate weights for each class** to make sure that the model is trained in
a fair manner without preference to any specific class due to greater number of samples.
We use a naive method to calculate these weights, finding an **inverse proportion** of
each class and using that as the weight.
```python
vals_dict = {}
for i in eeg["label"]:
if i in vals_dict.keys():
vals_dict[i] += 1
else:
vals_dict[i] = 1
total = sum(vals_dict.values())
# Formula used - Naive method where
# weight = 1 - (no. of samples present / total no. of samples)
# So more the samples, lower the weight
weight_dict = {k: (1 - (v / total)) for k, v in vals_dict.items()}
print(weight_dict)
```
<div class="k-default-codeblock">
```
{1: 0.9872413100261201, 0: 0.975989551938919, 14: 0.9841269841269842, 13: 0.9061683745228049, 9: 0.9838255977496484, 8: 0.9059674502712477, 11: 0.9847297568816556, 10: 0.9063692987743621, 18: 0.9838255977496484, 17: 0.9057665260196905, 16: 0.9373116335141651, 15: 0.9065702230259193, 2: 0.9211372312638135, 12: 0.9525818766325096, 3: 0.9245529435402853, 4: 0.943841671689773, 5: 0.9641350210970464, 6: 0.981514968856741, 7: 0.9443439823186659}
```
</div>
---
## Define simple function to plot all the metrics present in a `keras.callbacks.History`
object
```python
def plot_history_metrics(history: keras.callbacks.History):
total_plots = len(history.history)
cols = total_plots // 2
rows = total_plots // cols
if total_plots % cols != 0:
rows += 1
pos = range(1, total_plots + 1)
plt.figure(figsize=(15, 10))
for i, (key, value) in enumerate(history.history.items()):
plt.subplot(rows, cols, pos[i])
plt.plot(range(len(value)), value)
plt.title(str(key))
plt.show()
```
---
## Define function to generate Convolutional model
```python
def create_model():
input_layer = keras.Input(shape=(512, 1))
x = layers.Conv1D(
filters=32, kernel_size=3, strides=2, activation="relu", padding="same"
)(input_layer)
x = layers.BatchNormalization()(x)
x = layers.Conv1D(
filters=64, kernel_size=3, strides=2, activation="relu", padding="same"
)(x)
x = layers.BatchNormalization()(x)
x = layers.Conv1D(
filters=128, kernel_size=5, strides=2, activation="relu", padding="same"
)(x)
x = layers.BatchNormalization()(x)
x = layers.Conv1D(
filters=256, kernel_size=5, strides=2, activation="relu", padding="same"
)(x)
x = layers.BatchNormalization()(x)
x = layers.Conv1D(
filters=512, kernel_size=7, strides=2, activation="relu", padding="same"
)(x)
x = layers.BatchNormalization()(x)
x = layers.Conv1D(
filters=1024,
kernel_size=7,
strides=2,
activation="relu",
padding="same",
)(x)
x = layers.BatchNormalization()(x)
x = layers.Dropout(0.2)(x)
x = layers.Flatten()(x)
x = layers.Dense(4096, activation="relu")(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(
2048, activation="relu", kernel_regularizer=keras.regularizers.L2()
)(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(
1024, activation="relu", kernel_regularizer=keras.regularizers.L2()
)(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(
128, activation="relu", kernel_regularizer=keras.regularizers.L2()
)(x)
output_layer = layers.Dense(num_classes, activation="softmax")(x)
return keras.Model(inputs=input_layer, outputs=output_layer)
```
---
## Get Model summary
```python
conv_model = create_model()
conv_model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv1d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv1d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">6,208</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv1d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">41,088</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">512</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv1d_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">164,096</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization_3 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,024</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv1d_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">918,016</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization_4 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,048</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv1d_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">1024</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">3,671,040</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization_5 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">1024</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,096</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">1024</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ flatten (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8192</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4096</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">33,558,528</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4096</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">8,390,656</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1024</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,098,176</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1024</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">131,200</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">19</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,451</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">48,989,651</span> (186.88 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">48,985,619</span> (186.87 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">4,032</span> (15.75 KB)
</pre>
---
## Define callbacks, optimizer, loss and metrics
We set the number of epochs at 30 after performing extensive experimentation. It was seen
that this was the optimal number, after performing Early-Stopping analysis as well.
We define a Model Checkpoint callback to make sure that we only get the best model
weights.
We also define a ReduceLROnPlateau as there were several cases found during
experimentation where the loss stagnated after a certain point. On the other hand, a
direct LRScheduler was found to be too aggressive in its decay.
```python
epochs = 30
callbacks = [
keras.callbacks.ModelCheckpoint(
"best_model.keras", save_best_only=True, monitor="loss"
),
keras.callbacks.ReduceLROnPlateau(
monitor="val_top_k_categorical_accuracy",
factor=0.2,
patience=2,
min_lr=0.000001,
),
]
optimizer = keras.optimizers.Adam(amsgrad=True, learning_rate=0.001)
loss = keras.losses.CategoricalCrossentropy()
```
---
## Compile model and call `model.fit()`
We use the `Adam` optimizer since it is commonly considered the best choice for
preliminary training, and was found to be the best optimizer.
We use `CategoricalCrossentropy` as the loss as our labels are in a one-hot-encoded form.
We define the `TopKCategoricalAccuracy(k=3)`, `AUC`, `Precision` and `Recall` metrics to
further aid in understanding the model better.
```python
conv_model.compile(
optimizer=optimizer,
loss=loss,
metrics=[
keras.metrics.TopKCategoricalAccuracy(k=3),
keras.metrics.AUC(),
keras.metrics.Precision(),
keras.metrics.Recall(),
],
)
conv_model_history = conv_model.fit(
train_dataset,
epochs=epochs,
callbacks=callbacks,
validation_data=test_dataset,
class_weight=weight_dict,
)
```
<div class="k-default-codeblock">
```
Epoch 1/30
8/133 ━[37m━━━━━━━━━━━━━━━━━━━ 1s 16ms/step - auc: 0.5550 - loss: 45.5990 - precision: 0.0183 - recall: 0.0049 - top_k_categorical_accuracy: 0.2154
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1699421521.552287 4412 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
W0000 00:00:1699421521.578522 4412 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update
133/133 ━━━━━━━━━━━━━━━━━━━━ 0s 134ms/step - auc: 0.6119 - loss: 24.8582 - precision: 0.0465 - recall: 0.0022 - top_k_categorical_accuracy: 0.2479
W0000 00:00:1699421539.207966 4409 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update
W0000 00:00:1699421541.374400 4408 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update
W0000 00:00:1699421542.991471 4406 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update
133/133 ━━━━━━━━━━━━━━━━━━━━ 44s 180ms/step - auc: 0.6122 - loss: 24.7734 - precision: 0.0466 - recall: 0.0022 - top_k_categorical_accuracy: 0.2481 - val_auc: 0.6470 - val_loss: 4.1950 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00 - val_top_k_categorical_accuracy: 0.2610 - learning_rate: 0.0010
Epoch 2/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.6958 - loss: 3.5651 - precision: 0.0000e+00 - recall: 0.0000e+00 - top_k_categorical_accuracy: 0.3162 - val_auc: 0.6364 - val_loss: 3.3169 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00 - val_top_k_categorical_accuracy: 0.2436 - learning_rate: 0.0010
Epoch 3/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.7068 - loss: 2.8805 - precision: 0.1910 - recall: 1.2846e-04 - top_k_categorical_accuracy: 0.3220 - val_auc: 0.6313 - val_loss: 3.0662 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00 - val_top_k_categorical_accuracy: 0.2503 - learning_rate: 0.0010
Epoch 4/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.7370 - loss: 2.6265 - precision: 0.0719 - recall: 2.8215e-04 - top_k_categorical_accuracy: 0.3572 - val_auc: 0.5952 - val_loss: 3.1744 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00 - val_top_k_categorical_accuracy: 0.2282 - learning_rate: 2.0000e-04
Epoch 5/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 9s 65ms/step - auc: 0.7703 - loss: 2.4886 - precision: 0.3738 - recall: 0.0022 - top_k_categorical_accuracy: 0.4029 - val_auc: 0.6320 - val_loss: 3.3036 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00 - val_top_k_categorical_accuracy: 0.2564 - learning_rate: 2.0000e-04
Epoch 6/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 9s 66ms/step - auc: 0.8187 - loss: 2.3009 - precision: 0.6264 - recall: 0.0082 - top_k_categorical_accuracy: 0.4852 - val_auc: 0.6743 - val_loss: 3.4905 - val_precision: 0.1957 - val_recall: 0.0060 - val_top_k_categorical_accuracy: 0.3179 - learning_rate: 4.0000e-05
Epoch 7/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.8577 - loss: 2.1272 - precision: 0.6079 - recall: 0.0307 - top_k_categorical_accuracy: 0.5553 - val_auc: 0.6674 - val_loss: 3.8436 - val_precision: 0.2184 - val_recall: 0.0127 - val_top_k_categorical_accuracy: 0.3286 - learning_rate: 4.0000e-05
Epoch 8/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.8875 - loss: 1.9671 - precision: 0.6614 - recall: 0.0580 - top_k_categorical_accuracy: 0.6400 - val_auc: 0.6577 - val_loss: 4.2607 - val_precision: 0.2212 - val_recall: 0.0167 - val_top_k_categorical_accuracy: 0.3186 - learning_rate: 4.0000e-05
Epoch 9/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9143 - loss: 1.7926 - precision: 0.6770 - recall: 0.0992 - top_k_categorical_accuracy: 0.7189 - val_auc: 0.6465 - val_loss: 4.8088 - val_precision: 0.1780 - val_recall: 0.0228 - val_top_k_categorical_accuracy: 0.3112 - learning_rate: 4.0000e-05
Epoch 10/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9347 - loss: 1.6323 - precision: 0.6741 - recall: 0.1508 - top_k_categorical_accuracy: 0.7832 - val_auc: 0.6483 - val_loss: 4.8556 - val_precision: 0.2424 - val_recall: 0.0268 - val_top_k_categorical_accuracy: 0.3072 - learning_rate: 8.0000e-06
Epoch 11/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 9s 64ms/step - auc: 0.9442 - loss: 1.5469 - precision: 0.6985 - recall: 0.1855 - top_k_categorical_accuracy: 0.8095 - val_auc: 0.6443 - val_loss: 5.0003 - val_precision: 0.2216 - val_recall: 0.0288 - val_top_k_categorical_accuracy: 0.3052 - learning_rate: 8.0000e-06
Epoch 12/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 9s 64ms/step - auc: 0.9490 - loss: 1.4935 - precision: 0.7196 - recall: 0.2063 - top_k_categorical_accuracy: 0.8293 - val_auc: 0.6411 - val_loss: 5.0008 - val_precision: 0.2383 - val_recall: 0.0341 - val_top_k_categorical_accuracy: 0.3112 - learning_rate: 1.6000e-06
Epoch 13/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 9s 65ms/step - auc: 0.9514 - loss: 1.4739 - precision: 0.7071 - recall: 0.2147 - top_k_categorical_accuracy: 0.8371 - val_auc: 0.6411 - val_loss: 5.0279 - val_precision: 0.2356 - val_recall: 0.0355 - val_top_k_categorical_accuracy: 0.3126 - learning_rate: 1.6000e-06
Epoch 14/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 2s 14ms/step - auc: 0.9512 - loss: 1.4739 - precision: 0.7102 - recall: 0.2141 - top_k_categorical_accuracy: 0.8349 - val_auc: 0.6407 - val_loss: 5.0457 - val_precision: 0.2340 - val_recall: 0.0368 - val_top_k_categorical_accuracy: 0.3099 - learning_rate: 1.0000e-06
Epoch 15/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 9s 64ms/step - auc: 0.9533 - loss: 1.4524 - precision: 0.7206 - recall: 0.2240 - top_k_categorical_accuracy: 0.8421 - val_auc: 0.6400 - val_loss: 5.0557 - val_precision: 0.2292 - val_recall: 0.0368 - val_top_k_categorical_accuracy: 0.3092 - learning_rate: 1.0000e-06
Epoch 16/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9536 - loss: 1.4489 - precision: 0.7201 - recall: 0.2218 - top_k_categorical_accuracy: 0.8367 - val_auc: 0.6401 - val_loss: 5.0850 - val_precision: 0.2336 - val_recall: 0.0382 - val_top_k_categorical_accuracy: 0.3072 - learning_rate: 1.0000e-06
Epoch 17/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9542 - loss: 1.4429 - precision: 0.7207 - recall: 0.2353 - top_k_categorical_accuracy: 0.8404 - val_auc: 0.6397 - val_loss: 5.1047 - val_precision: 0.2249 - val_recall: 0.0375 - val_top_k_categorical_accuracy: 0.3086 - learning_rate: 1.0000e-06
Epoch 18/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9547 - loss: 1.4353 - precision: 0.7195 - recall: 0.2323 - top_k_categorical_accuracy: 0.8455 - val_auc: 0.6389 - val_loss: 5.1215 - val_precision: 0.2305 - val_recall: 0.0395 - val_top_k_categorical_accuracy: 0.3072 - learning_rate: 1.0000e-06
Epoch 19/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9554 - loss: 1.4271 - precision: 0.7254 - recall: 0.2326 - top_k_categorical_accuracy: 0.8492 - val_auc: 0.6386 - val_loss: 5.1395 - val_precision: 0.2269 - val_recall: 0.0395 - val_top_k_categorical_accuracy: 0.3072 - learning_rate: 1.0000e-06
Epoch 20/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9559 - loss: 1.4221 - precision: 0.7248 - recall: 0.2471 - top_k_categorical_accuracy: 0.8439 - val_auc: 0.6385 - val_loss: 5.1655 - val_precision: 0.2264 - val_recall: 0.0402 - val_top_k_categorical_accuracy: 0.3052 - learning_rate: 1.0000e-06
Epoch 21/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 64ms/step - auc: 0.9565 - loss: 1.4170 - precision: 0.7169 - recall: 0.2421 - top_k_categorical_accuracy: 0.8543 - val_auc: 0.6385 - val_loss: 5.1851 - val_precision: 0.2271 - val_recall: 0.0415 - val_top_k_categorical_accuracy: 0.3072 - learning_rate: 1.0000e-06
Epoch 22/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9577 - loss: 1.4029 - precision: 0.7305 - recall: 0.2518 - top_k_categorical_accuracy: 0.8536 - val_auc: 0.6384 - val_loss: 5.2043 - val_precision: 0.2279 - val_recall: 0.0415 - val_top_k_categorical_accuracy: 0.3059 - learning_rate: 1.0000e-06
Epoch 23/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9574 - loss: 1.4048 - precision: 0.7285 - recall: 0.2575 - top_k_categorical_accuracy: 0.8527 - val_auc: 0.6382 - val_loss: 5.2247 - val_precision: 0.2308 - val_recall: 0.0442 - val_top_k_categorical_accuracy: 0.3106 - learning_rate: 1.0000e-06
Epoch 24/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9579 - loss: 1.3998 - precision: 0.7426 - recall: 0.2588 - top_k_categorical_accuracy: 0.8503 - val_auc: 0.6386 - val_loss: 5.2479 - val_precision: 0.2308 - val_recall: 0.0442 - val_top_k_categorical_accuracy: 0.3092 - learning_rate: 1.0000e-06
Epoch 25/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9585 - loss: 1.3918 - precision: 0.7348 - recall: 0.2609 - top_k_categorical_accuracy: 0.8607 - val_auc: 0.6378 - val_loss: 5.2648 - val_precision: 0.2287 - val_recall: 0.0448 - val_top_k_categorical_accuracy: 0.3106 - learning_rate: 1.0000e-06
Epoch 26/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9587 - loss: 1.3881 - precision: 0.7425 - recall: 0.2669 - top_k_categorical_accuracy: 0.8544 - val_auc: 0.6380 - val_loss: 5.2877 - val_precision: 0.2226 - val_recall: 0.0448 - val_top_k_categorical_accuracy: 0.3099 - learning_rate: 1.0000e-06
Epoch 27/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9590 - loss: 1.3834 - precision: 0.7469 - recall: 0.2665 - top_k_categorical_accuracy: 0.8599 - val_auc: 0.6379 - val_loss: 5.3021 - val_precision: 0.2252 - val_recall: 0.0455 - val_top_k_categorical_accuracy: 0.3072 - learning_rate: 1.0000e-06
Epoch 28/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 64ms/step - auc: 0.9597 - loss: 1.3763 - precision: 0.7600 - recall: 0.2701 - top_k_categorical_accuracy: 0.8628 - val_auc: 0.6380 - val_loss: 5.3241 - val_precision: 0.2244 - val_recall: 0.0469 - val_top_k_categorical_accuracy: 0.3119 - learning_rate: 1.0000e-06
Epoch 29/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9601 - loss: 1.3692 - precision: 0.7549 - recall: 0.2761 - top_k_categorical_accuracy: 0.8634 - val_auc: 0.6372 - val_loss: 5.3494 - val_precision: 0.2229 - val_recall: 0.0469 - val_top_k_categorical_accuracy: 0.3119 - learning_rate: 1.0000e-06
Epoch 30/30
133/133 ━━━━━━━━━━━━━━━━━━━━ 8s 63ms/step - auc: 0.9604 - loss: 1.3694 - precision: 0.7447 - recall: 0.2723 - top_k_categorical_accuracy: 0.8648 - val_auc: 0.6372 - val_loss: 5.3667 - val_precision: 0.2226 - val_recall: 0.0475 - val_top_k_categorical_accuracy: 0.3119 - learning_rate: 1.0000e-06
```
</div>
---
## Visualize model metrics during training
We use the function defined above to see model metrics during training.
```python
plot_history_metrics(conv_model_history)
```

---
## Evaluate model on test data
```python
loss, accuracy, auc, precision, recall = conv_model.evaluate(test_dataset)
print(f"Loss : {loss}")
print(f"Top 3 Categorical Accuracy : {accuracy}")
print(f"Area under the Curve (ROC) : {auc}")
print(f"Precision : {precision}")
print(f"Recall : {recall}")
def view_evaluated_eeg_plots(model):
start_index = random.randint(10, len(eeg))
end_index = start_index + 11
data = eeg.loc[start_index:end_index, "raw_values"]
data_array = [scaler.fit_transform(np.asarray(i).reshape(-1, 1)) for i in data]
data_array = [np.asarray(data_array).astype(np.float32).reshape(-1, 512, 1)]
original_labels = eeg.loc[start_index:end_index, "label"]
predicted_labels = np.argmax(model.predict(data_array, verbose=0), axis=1)
original_labels = [
le.inverse_transform(np.array(label).reshape(-1))[0]
for label in original_labels
]
predicted_labels = [
le.inverse_transform(np.array(label).reshape(-1))[0]
for label in predicted_labels
]
total_plots = 12
cols = total_plots // 3
rows = total_plots // cols
if total_plots % cols != 0:
rows += 1
pos = range(1, total_plots + 1)
fig = plt.figure(figsize=(20, 10))
for i, (plot_data, og_label, pred_label) in enumerate(
zip(data, original_labels, predicted_labels)
):
plt.subplot(rows, cols, pos[i])
plt.plot(plot_data)
plt.title(f"Actual Label : {og_label}\nPredicted Label : {pred_label}")
fig.subplots_adjust(hspace=0.5)
plt.show()
view_evaluated_eeg_plots(conv_model)
```
<div class="k-default-codeblock">
```
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - auc: 0.6438 - loss: 5.3150 - precision: 0.2589 - recall: 0.0565 - top_k_categorical_accuracy: 0.3281
Loss : 5.366718769073486
Top 3 Categorical Accuracy : 0.6372398138046265
Area under the Curve (ROC) : 0.222570538520813
Precision : 0.04752342775464058
Recall : 0.311914324760437
W0000 00:00:1699421785.101645 4408 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update
```
</div>

| keras-io/examples/timeseries/md/eeg_signal_classification.md/0 | {
"file_path": "keras-io/examples/timeseries/md/eeg_signal_classification.md",
"repo_id": "keras-io",
"token_count": 20720
} | 106 |
"""
Title: Barlow Twins for Contrastive SSL
Author: [Abhiraam Eranti](https://github.com/dewball345)
Date created: 11/4/21
Last modified: 12/20/21
Description: A keras implementation of Barlow Twins (constrastive SSL with redundancy reduction).
Accelerator: GPU
"""
"""
## Introduction
"""
"""
Self-supervised learning (SSL) is a relatively novel technique in which a model
learns from unlabeled data, and is often used when the data is corrupted or
if there is very little of it. A practical use for SSL is to create
intermediate embeddings that are learned from the data. These embeddings are
based on the dataset itself, with similar images having similar embeddings, and
vice versa. They are then attached to the rest of the model, which uses those
embeddings as information and effectively learns and makes predictions properly.
These embeddings, ideally, should contain as much information and insight about
the data as possible, so that the model can make better predictions. However,
a common problem that arises is that the model creates embeddings that are
redundant. For example, if two images are similar, the model will create
embeddings that are just a string of 1's, or some other value that
contains repeating bits of information. This is no better than a one-hot
encoding or just having one bit as the model’s representations; it defeats the
purpose of the embeddings, as they do not learn as much about the dataset as
possible. For other approaches, the solution to the problem was to carefully
configure the model such that it tries not to be redundant.
Barlow Twins is a new approach to this problem; while other solutions mainly
tackle the first goal of invariance (similar images have similar embeddings),
the Barlow Twins method also prioritizes the goal of reducing redundancy.
It also has the advantage of being much simpler than other methods, and its
model architecture is symmetric, meaning that both twins in the model do the
same thing. It is also near state-of-the-art on imagenet, even exceeding methods
like SimCLR.
One disadvantage of Barlow Twins is that it is heavily dependent on
augmentation, suffering major performance decreases in accuracy without them.
TL, DR: Barlow twins creates representations that are:
* Invariant.
* Not redundant, and carry as much info about the dataset.
Also, it is simpler than other methods.
This notebook can train a Barlow Twins model and reach up to
64% validation accuracy on the CIFAR-10 dataset.
"""
"""

"""
"""
### High-Level Theory
"""
"""
The model takes two versions of the same image(with different augmentations) as
input. Then it takes a prediction of each of them, creating representations.
They are then used to make a cross-correlation matrix.
Cross-correlation matrix:
```
(pred_1.T @ pred_2) / batch_size
```
The cross-correlation matrix measures the correlation between the output
neurons in the two representations made by the model predictions of the two
augmented versions of data. Ideally, a cross-correlation matrix should look
like an identity matrix if the two images are the same.
When this happens, it means that the representations:
1. Are invariant. The diagonal shows the correlation between each
representation's neurons and its corresponding augmented one. Because the two
versions come from the same image, the diagonal of the matrix should show that
there is a strong correlation between them. If the images are different, there
shouldn't be a diagonal.
2. Do not show signs of redundancy. If the neurons show correlation with a
non-diagonal neuron, it means that it is not correctly identifying similarities
between the two augmented images. This means that it is redundant.
Here is a good way of understanding in pseudocode(information from the original
paper):
```
c[i][i] = 1
c[i][j] = 0
where:
c is the cross-correlation matrix
i is the index of one representation's neuron
j is the index of the second representation's neuron
```
"""
"""
Taken from the original paper: [Barlow Twins: Self-Supervised Learning via Redundancy
Reduction](https://arxiv.org/abs/2103.03230)
"""
"""
### References
"""
"""
Paper:
[Barlow Twins: Self-Supervised Learning via Redundancy
Reduction](https://arxiv.org/abs/2103.03230)
Original Implementation:
[facebookresearch/barlowtwins](https://github.com/facebookresearch/barlowtwins)
"""
"""
## Setup
"""
"""shell
pip install tensorflow-addons
"""
import os
# slightly faster improvements, on the first epoch 30 second decrease and a 1-2 second
# decrease in epoch time. Overall saves approx. 5 min of training time
# Allocates two threads for a gpu private which allows more operations to be
# done faster
os.environ["TF_GPU_THREAD_MODE"] = "gpu_private"
import tensorflow as tf # framework
from tensorflow import keras # for tf.keras
import tensorflow_addons as tfa # LAMB optimizer and gaussian_blur_2d function
import numpy as np # np.random.random
import matplotlib.pyplot as plt # graphs
import datetime # tensorboard logs naming
# XLA optimization for faster performance(up to 10-15 minutes total time saved)
tf.config.optimizer.set_jit(True)
"""
## Load the CIFAR-10 dataset
"""
[
(train_features, train_labels),
(test_features, test_labels),
] = keras.datasets.cifar10.load_data()
train_features = train_features / 255.0
test_features = test_features / 255.0
"""
## Necessary Hyperparameters
"""
# Batch size of dataset
BATCH_SIZE = 512
# Width and height of image
IMAGE_SIZE = 32
"""
## Augmentation Utilities
The Barlow twins algorithm is heavily reliant on
Augmentation. One unique feature of the method is that sometimes, augmentations
probabilistically occur.
**Augmentations**
* *RandomToGrayscale*: randomly applies grayscale to image 20% of the time
* *RandomColorJitter*: randomly applies color jitter 80% of the time
* *RandomFlip*: randomly flips image horizontally 50% of the time
* *RandomResizedCrop*: randomly crops an image to a random size then resizes. This
happens 100% of the time
* *RandomSolarize*: randomly applies solarization to an image 20% of the time
* *RandomBlur*: randomly blurs an image 20% of the time
"""
class Augmentation(keras.layers.Layer):
"""Base augmentation class.
Base augmentation class. Contains the random_execute method.
Methods:
random_execute: method that returns true or false based
on a probability. Used to determine whether an augmentation
will be run.
"""
def __init__(self):
super().__init__()
@tf.function
def random_execute(self, prob: float) -> bool:
"""random_execute function.
Arguments:
prob: a float value from 0-1 that determines the
probability.
Returns:
returns true or false based on the probability.
"""
return tf.random.uniform([], minval=0, maxval=1) < prob
class RandomToGrayscale(Augmentation):
"""RandomToGrayscale class.
RandomToGrayscale class. Randomly makes an image
grayscaled based on the random_execute method. There
is a 20% chance that an image will be grayscaled.
Methods:
call: method that grayscales an image 20% of
the time.
"""
@tf.function
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a grayscaled version of the image 20% of the time
and the original image 80% of the time.
"""
if self.random_execute(0.2):
x = tf.image.rgb_to_grayscale(x)
x = tf.tile(x, [1, 1, 3])
return x
class RandomColorJitter(Augmentation):
"""RandomColorJitter class.
RandomColorJitter class. Randomly adds color jitter to an image.
Color jitter means to add random brightness, contrast,
saturation, and hue to an image. There is a 80% chance that an
image will be randomly color-jittered.
Methods:
call: method that color-jitters an image 80% of
the time.
"""
@tf.function
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Adds color jitter to image, including:
Brightness change by a max-delta of 0.8
Contrast change by a max-delta of 0.8
Saturation change by a max-delta of 0.8
Hue change by a max-delta of 0.2
Originally, the same deltas of the original paper
were used, but a performance boost of almost 2% was found
when doubling them.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a color-jittered version of the image 80% of the time
and the original image 20% of the time.
"""
if self.random_execute(0.8):
x = tf.image.random_brightness(x, 0.8)
x = tf.image.random_contrast(x, 0.4, 1.6)
x = tf.image.random_saturation(x, 0.4, 1.6)
x = tf.image.random_hue(x, 0.2)
return x
class RandomFlip(Augmentation):
"""RandomFlip class.
RandomFlip class. Randomly flips image horizontally. There is a 50%
chance that an image will be randomly flipped.
Methods:
call: method that flips an image 50% of
the time.
"""
@tf.function
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Randomly flips the image.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a flipped version of the image 50% of the time
and the original image 50% of the time.
"""
if self.random_execute(0.5):
x = tf.image.random_flip_left_right(x)
return x
class RandomResizedCrop(Augmentation):
"""RandomResizedCrop class.
RandomResizedCrop class. Randomly crop an image to a random size,
then resize the image back to the original size.
Attributes:
image_size: The dimension of the image
Methods:
__call__: method that does random resize crop to the image.
"""
def __init__(self, image_size):
super().__init__()
self.image_size = image_size
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Does random resize crop by randomly cropping an image to a random
size 75% - 100% the size of the image. Then resizes it.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a randomly cropped image.
"""
rand_size = tf.random.uniform(
shape=[],
minval=int(0.75 * self.image_size),
maxval=1 * self.image_size,
dtype=tf.int32,
)
crop = tf.image.random_crop(x, (rand_size, rand_size, 3))
crop_resize = tf.image.resize(crop, (self.image_size, self.image_size))
return crop_resize
class RandomSolarize(Augmentation):
"""RandomSolarize class.
RandomSolarize class. Randomly solarizes an image.
Solarization is when pixels accidentally flip to an inverted state.
Methods:
call: method that does random solarization 20% of the time.
"""
@tf.function
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Randomly solarizes the image.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a solarized version of the image 20% of the time
and the original image 80% of the time.
"""
if self.random_execute(0.2):
# flips abnormally low pixels to abnormally high pixels
x = tf.where(x < 10, x, 255 - x)
return x
class RandomBlur(Augmentation):
"""RandomBlur class.
RandomBlur class. Randomly blurs an image.
Methods:
call: method that does random blur 20% of the time.
"""
@tf.function
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Randomly solarizes the image.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a blurred version of the image 20% of the time
and the original image 80% of the time.
"""
if self.random_execute(0.2):
s = np.random.random()
return tfa.image.gaussian_filter2d(image=x, sigma=s)
return x
class RandomAugmentor(keras.Model):
"""RandomAugmentor class.
RandomAugmentor class. Chains all the augmentations into
one pipeline.
Attributes:
image_size: An integer represing the width and height
of the image. Designed to be used for square images.
random_resized_crop: Instance variable representing the
RandomResizedCrop layer.
random_flip: Instance variable representing the
RandomFlip layer.
random_color_jitter: Instance variable representing the
RandomColorJitter layer.
random_blur: Instance variable representing the
RandomBlur layer
random_to_grayscale: Instance variable representing the
RandomToGrayscale layer
random_solarize: Instance variable representing the
RandomSolarize layer
Methods:
call: chains layers in pipeline together
"""
def __init__(self, image_size: int):
super().__init__()
self.image_size = image_size
self.random_resized_crop = RandomResizedCrop(image_size)
self.random_flip = RandomFlip()
self.random_color_jitter = RandomColorJitter()
self.random_blur = RandomBlur()
self.random_to_grayscale = RandomToGrayscale()
self.random_solarize = RandomSolarize()
def call(self, x: tf.Tensor) -> tf.Tensor:
x = self.random_resized_crop(x)
x = self.random_flip(x)
x = self.random_color_jitter(x)
x = self.random_blur(x)
x = self.random_to_grayscale(x)
x = self.random_solarize(x)
x = tf.clip_by_value(x, 0, 1)
return x
bt_augmentor = RandomAugmentor(IMAGE_SIZE)
"""
## Data Loading
A class that creates the barlow twins' dataset.
The dataset consists of two copies of each image, with each copy receiving different
augmentations.
"""
class BTDatasetCreator:
"""Barlow twins dataset creator class.
BTDatasetCreator class. Responsible for creating the
barlow twins' dataset.
Attributes:
options: tf.data.Options needed to configure a setting
that may improve performance.
seed: random seed for shuffling. Used to synchronize two
augmented versions.
augmentor: augmentor used for augmentation.
Methods:
__call__: creates barlow dataset.
augmented_version: creates 1 half of the dataset.
"""
def __init__(self, augmentor: RandomAugmentor, seed: int = 1024):
self.options = tf.data.Options()
self.options.threading.max_intra_op_parallelism = 1
self.seed = seed
self.augmentor = augmentor
def augmented_version(self, ds: list) -> tf.data.Dataset:
return (
tf.data.Dataset.from_tensor_slices(ds)
.shuffle(1000, seed=self.seed)
.map(self.augmentor, num_parallel_calls=tf.data.AUTOTUNE)
.batch(BATCH_SIZE, drop_remainder=True)
.prefetch(tf.data.AUTOTUNE)
.with_options(self.options)
)
def __call__(self, ds: list) -> tf.data.Dataset:
a1 = self.augmented_version(ds)
a2 = self.augmented_version(ds)
return tf.data.Dataset.zip((a1, a2)).with_options(self.options)
augment_versions = BTDatasetCreator(bt_augmentor)(train_features)
"""
View examples of dataset.
"""
sample_augment_versions = iter(augment_versions)
def plot_values(batch: tuple):
fig, axs = plt.subplots(3, 3)
fig1, axs1 = plt.subplots(3, 3)
fig.suptitle("Augmentation 1")
fig1.suptitle("Augmentation 2")
a1, a2 = batch
# plots images on both tables
for i in range(3):
for j in range(3):
# CHANGE(add / 255)
axs[i][j].imshow(a1[3 * i + j])
axs[i][j].axis("off")
axs1[i][j].imshow(a2[3 * i + j])
axs1[i][j].axis("off")
plt.show()
plot_values(next(sample_augment_versions))
"""
## Pseudocode of loss and model
The following sections follow the original author's pseudocode containing both model and
loss functions(see diagram below). Also contains a reference of variables used.
"""
"""

"""
"""
Reference:
```
y_a: first augmented version of original image.
y_b: second augmented version of original image.
z_a: model representation(embeddings) of y_a.
z_b: model representation(embeddings) of y_b.
z_a_norm: normalized z_a.
z_b_norm: normalized z_b.
c: cross correlation matrix.
c_diff: diagonal portion of loss(invariance term).
off_diag: off-diagonal portion of loss(redundancy reduction term).
```
"""
"""
## BarlowLoss: barlow twins model's loss function
Barlow Twins uses the cross correlation matrix for its loss. There are two parts to the
loss function:
* ***The invariance term***(diagonal). This part is used to make the diagonals of the
matrix into 1s. When this is the case, the matrix shows that the images are
correlated(same).
* The loss function subtracts 1 from the diagonal and squares the values.
* ***The redundancy reduction term***(off-diagonal). Here, the barlow twins loss
function aims to make these values zero. As mentioned before, it is redundant if the
representation neurons are correlated with values that are not on the diagonal.
* Off diagonals are squared.
After this the two parts are summed together.
"""
class BarlowLoss(keras.losses.Loss):
"""BarlowLoss class.
BarlowLoss class. Creates a loss function based on the cross-correlation
matrix.
Attributes:
batch_size: the batch size of the dataset
lambda_amt: the value for lambda(used in cross_corr_matrix_loss)
Methods:
__init__: gets instance variables
call: gets the loss based on the cross-correlation matrix
make_diag_zeros: Used in calculating off-diagonal section
of loss function; makes diagonals zeros.
cross_corr_matrix_loss: creates loss based on cross correlation
matrix.
"""
def __init__(self, batch_size: int):
"""__init__ method.
Gets the instance variables
Arguments:
batch_size: An integer value representing the batch size of the
dataset. Used for cross correlation matrix calculation.
"""
super().__init__()
self.lambda_amt = 5e-3
self.batch_size = batch_size
def get_off_diag(self, c: tf.Tensor) -> tf.Tensor:
"""get_off_diag method.
Makes the diagonals of the cross correlation matrix zeros.
This is used in the off-diagonal portion of the loss function,
where we take the squares of the off-diagonal values and sum them.
Arguments:
c: A tf.tensor that represents the cross correlation
matrix
Returns:
Returns a tf.tensor which represents the cross correlation
matrix with its diagonals as zeros.
"""
zero_diag = tf.zeros(c.shape[-1])
return tf.linalg.set_diag(c, zero_diag)
def cross_corr_matrix_loss(self, c: tf.Tensor) -> tf.Tensor:
"""cross_corr_matrix_loss method.
Gets the loss based on the cross correlation matrix.
We want the diagonals to be 1's and everything else to be
zeros to show that the two augmented images are similar.
Loss function procedure:
take the diagonal of the cross-correlation matrix, subtract by 1,
and square that value so no negatives.
Take the off-diagonal of the cc-matrix(see get_off_diag()),
square those values to get rid of negatives and increase the value,
and multiply it by a lambda to weight it such that it is of equal
value to the optimizer as the diagonal(there are more values off-diag
then on-diag)
Take the sum of the first and second parts and then sum them together.
Arguments:
c: A tf.tensor that represents the cross correlation
matrix
Returns:
Returns a tf.tensor which represents the cross correlation
matrix with its diagonals as zeros.
"""
# subtracts diagonals by one and squares them(first part)
c_diff = tf.pow(tf.linalg.diag_part(c) - 1, 2)
# takes off diagonal, squares it, multiplies with lambda(second part)
off_diag = tf.pow(self.get_off_diag(c), 2) * self.lambda_amt
# sum first and second parts together
loss = tf.reduce_sum(c_diff) + tf.reduce_sum(off_diag)
return loss
def normalize(self, output: tf.Tensor) -> tf.Tensor:
"""normalize method.
Normalizes the model prediction.
Arguments:
output: the model prediction.
Returns:
Returns a normalized version of the model prediction.
"""
return (output - tf.reduce_mean(output, axis=0)) / tf.math.reduce_std(
output, axis=0
)
def cross_corr_matrix(self, z_a_norm: tf.Tensor, z_b_norm: tf.Tensor) -> tf.Tensor:
"""cross_corr_matrix method.
Creates a cross correlation matrix from the predictions.
It transposes the first prediction and multiplies this with
the second, creating a matrix with shape (n_dense_units, n_dense_units).
See build_twin() for more info. Then it divides this with the
batch size.
Arguments:
z_a_norm: A normalized version of the first prediction.
z_b_norm: A normalized version of the second prediction.
Returns:
Returns a cross correlation matrix.
"""
return (tf.transpose(z_a_norm) @ z_b_norm) / self.batch_size
def call(self, z_a: tf.Tensor, z_b: tf.Tensor) -> tf.Tensor:
"""call method.
Makes the cross-correlation loss. Uses the CreateCrossCorr
class to make the cross corr matrix, then finds the loss and
returns it(see cross_corr_matrix_loss()).
Arguments:
z_a: The prediction of the first set of augmented data.
z_b: the prediction of the second set of augmented data.
Returns:
Returns a (rank-0) tf.Tensor that represents the loss.
"""
z_a_norm, z_b_norm = self.normalize(z_a), self.normalize(z_b)
c = self.cross_corr_matrix(z_a_norm, z_b_norm)
loss = self.cross_corr_matrix_loss(c)
return loss
"""
## Barlow Twins' Model Architecture
The model has two parts:
* The encoder network, which is a resnet-34.
* The projector network, which creates the model embeddings.
* This consists of an MLP with 3 dense-batchnorm-relu layers.
"""
"""
Resnet encoder network implementation:
"""
class ResNet34:
"""Resnet34 class.
Responsible for the Resnet 34 architecture.
Modified from
https://www.analyticsvidhya.com/blog/2021/08/how-to-code-your-resnet-from-scratch-in-tensorflow/#h2_2.
https://www.analyticsvidhya.com/blog/2021/08/how-to-code-your-resnet-from-scratch-in-tensorflow/#h2_2.
View their website for more information.
"""
def identity_block(self, x, filter):
# copy tensor to variable called x_skip
x_skip = x
# Layer 1
x = tf.keras.layers.Conv2D(filter, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=3)(x)
x = tf.keras.layers.Activation("relu")(x)
# Layer 2
x = tf.keras.layers.Conv2D(filter, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=3)(x)
# Add Residue
x = tf.keras.layers.Add()([x, x_skip])
x = tf.keras.layers.Activation("relu")(x)
return x
def convolutional_block(self, x, filter):
# copy tensor to variable called x_skip
x_skip = x
# Layer 1
x = tf.keras.layers.Conv2D(filter, (3, 3), padding="same", strides=(2, 2))(x)
x = tf.keras.layers.BatchNormalization(axis=3)(x)
x = tf.keras.layers.Activation("relu")(x)
# Layer 2
x = tf.keras.layers.Conv2D(filter, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=3)(x)
# Processing Residue with conv(1,1)
x_skip = tf.keras.layers.Conv2D(filter, (1, 1), strides=(2, 2))(x_skip)
# Add Residue
x = tf.keras.layers.Add()([x, x_skip])
x = tf.keras.layers.Activation("relu")(x)
return x
def __call__(self, shape=(32, 32, 3)):
# Step 1 (Setup Input Layer)
x_input = tf.keras.layers.Input(shape)
x = tf.keras.layers.ZeroPadding2D((3, 3))(x_input)
# Step 2 (Initial Conv layer along with maxPool)
x = tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding="same")(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding="same")(x)
# Define size of sub-blocks and initial filter size
block_layers = [3, 4, 6, 3]
filter_size = 64
# Step 3 Add the Resnet Blocks
for i in range(4):
if i == 0:
# For sub-block 1 Residual/Convolutional block not needed
for j in range(block_layers[i]):
x = self.identity_block(x, filter_size)
else:
# One Residual/Convolutional Block followed by Identity blocks
# The filter size will go on increasing by a factor of 2
filter_size = filter_size * 2
x = self.convolutional_block(x, filter_size)
for j in range(block_layers[i] - 1):
x = self.identity_block(x, filter_size)
# Step 4 End Dense Network
x = tf.keras.layers.AveragePooling2D((2, 2), padding="same")(x)
x = tf.keras.layers.Flatten()(x)
model = tf.keras.models.Model(inputs=x_input, outputs=x, name="ResNet34")
return model
"""
Projector network:
"""
def build_twin() -> keras.Model:
"""build_twin method.
Builds a barlow twins model consisting of an encoder(resnet-34)
and a projector, which generates embeddings for the images
Returns:
returns a barlow twins model
"""
# number of dense neurons in the projector
n_dense_neurons = 5000
# encoder network
resnet = ResNet34()()
last_layer = resnet.layers[-1].output
# intermediate layers of the projector network
n_layers = 2
for i in range(n_layers):
dense = tf.keras.layers.Dense(n_dense_neurons, name=f"projector_dense_{i}")
if i == 0:
x = dense(last_layer)
else:
x = dense(x)
x = tf.keras.layers.BatchNormalization(name=f"projector_bn_{i}")(x)
x = tf.keras.layers.ReLU(name=f"projector_relu_{i}")(x)
x = tf.keras.layers.Dense(n_dense_neurons, name=f"projector_dense_{n_layers}")(x)
model = keras.Model(resnet.input, x)
return model
"""
## Training Loop Model
See pseudocode for reference.
"""
class BarlowModel(keras.Model):
"""BarlowModel class.
BarlowModel class. Responsible for making predictions and handling
gradient descent with the optimizer.
Attributes:
model: the barlow model architecture.
loss_tracker: the loss metric.
Methods:
train_step: one train step; do model predictions, loss, and
optimizer step.
metrics: Returns metrics.
"""
def __init__(self):
super().__init__()
self.model = build_twin()
self.loss_tracker = keras.metrics.Mean(name="loss")
@property
def metrics(self):
return [self.loss_tracker]
def train_step(self, batch: tf.Tensor) -> tf.Tensor:
"""train_step method.
Do one train step. Make model predictions, find loss, pass loss to
optimizer, and make optimizer apply gradients.
Arguments:
batch: one batch of data to be given to the loss function.
Returns:
Returns a dictionary with the loss metric.
"""
# get the two augmentations from the batch
y_a, y_b = batch
with tf.GradientTape() as tape:
# get two versions of predictions
z_a, z_b = self.model(y_a, training=True), self.model(y_b, training=True)
loss = self.loss(z_a, z_b)
grads_model = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads_model, self.model.trainable_variables))
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
"""
## Model Training
* Used the LAMB optimizer, instead of ADAM or SGD.
* Similar to the LARS optimizer used in the paper, and lets the model converge much
faster than other methods.
* Expected training time: 1 hour 30 min. Go and eat a snack or take a nap or something.
"""
# sets up model, optimizer, loss
bm = BarlowModel()
# chose the LAMB optimizer due to high batch sizes. Converged MUCH faster
# than ADAM or SGD
optimizer = tfa.optimizers.LAMB()
loss = BarlowLoss(BATCH_SIZE)
bm.compile(optimizer=optimizer, loss=loss)
# Expected training time: 1 hours 30 min
history = bm.fit(augment_versions, epochs=160)
plt.plot(history.history["loss"])
plt.show()
"""
## Evaluation
**Linear evaluation:** to evaluate the model's performance, we add
a linear dense layer at the end and freeze the main model's weights, only letting the
dense layer to be tuned. If the model actually learned something, then the accuracy would
be significantly higher than random chance.
**Accuracy on CIFAR-10** : 64% for this notebook. This is much better than the 10% we get
from random guessing.
"""
# Approx: 64% accuracy with this barlow twins model.
xy_ds = (
tf.data.Dataset.from_tensor_slices((train_features, train_labels))
.shuffle(1000)
.batch(BATCH_SIZE, drop_remainder=True)
.prefetch(tf.data.AUTOTUNE)
)
test_ds = (
tf.data.Dataset.from_tensor_slices((test_features, test_labels))
.shuffle(1000)
.batch(BATCH_SIZE, drop_remainder=True)
.prefetch(tf.data.AUTOTUNE)
)
model = keras.models.Sequential(
[
bm.model,
keras.layers.Dense(
10, activation="softmax", kernel_regularizer=keras.regularizers.l2(0.02)
),
]
)
model.layers[0].trainable = False
linear_optimizer = tfa.optimizers.LAMB()
model.compile(
optimizer=linear_optimizer,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
model.fit(xy_ds, epochs=35, validation_data=test_ds)
"""
## Conclusion
* Barlow Twins is a simple and concise method for contrastive and self-supervised
learning.
* With this resnet-34 model architecture, we were able to reach 62-64% validation
accuracy.
## Use-Cases of Barlow-Twins(and contrastive learning in General)
* Semi-supervised learning: You can see that this model gave a 62-64% boost in accuracy
when it wasn't even trained with the labels. It can be used when you have little labeled
data but a lot of unlabeled data.
* You do barlow twins training on the unlabeled data, and then you do secondary training
with the labeled data.
## Helpful links
* [Paper](https://arxiv.org/abs/2103.03230)
* [Original Pytorch Implementation](https://github.com/facebookresearch/barlowtwins)
* [Sayak Paul's Implementation](https://colab.research.google.com/github/sayakpaul/Barlow-Twins-TF/blob/main/Barlow_Twins.ipynb#scrollTo=GlWepkM8_prl).
* Thanks to Sayak Paul for his implementation. It helped me with debugging and
comparisons of accuracy, loss.
* [resnet34 implementation](https://www.analyticsvidhya.com/blog/2021/08/how-to-code-your-resnet-from-scratch-in-tensorflow/#h2_2)
* Thanks to Yashowardhan Shinde for writing the article.
"""
| keras-io/examples/vision/barlow_twins.py/0 | {
"file_path": "keras-io/examples/vision/barlow_twins.py",
"repo_id": "keras-io",
"token_count": 12437
} | 107 |
"""
Title: Focal Modulation: A replacement for Self-Attention
Author: [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Ritwik Raha](https://twitter.com/ritwik_raha)
Date created: 2023/01/25
Last modified: 2023/02/15
Description: Image classification with Focal Modulation Networks.
Accelerator: GPU
"""
"""
## Introduction
This tutorial aims to provide a comprehensive guide to the implementation of
Focal Modulation Networks, as presented in
[Yang et al.](https://arxiv.org/abs/2203.11926).
This tutorial will provide a formal, minimalistic approach to implementing Focal
Modulation Networks and explore its potential applications in the field of Deep Learning.
**Problem statement**
The Transformer architecture ([Vaswani et al.](https://arxiv.org/abs/1706.03762)),
which has become the de facto standard in most Natural Language Processing tasks, has
also been applied to the field of computer vision, e.g. Vision
Transformers ([Dosovitskiy et al.](https://arxiv.org/abs/2010.11929v2)).
> In Transformers, the self-attention (SA) is arguably the key to its success which
enables input-dependent global interactions, in contrast to convolution operation which
constraints interactions in a local region with a shared kernel.
The **Attention** module is mathematically written as shown in **Equation 1**.
|  |
| :--: |
| Equation 1: The mathematical equation of attention (Source: Aritra and Ritwik) |
Where:
- `Q` is the query
- `K` is the key
- `V` is the value
- `d_k` is the dimension of the key
With **self-attention**, the query, key, and value are all sourced from the input
sequence. Let us rewrite the attention equation for self-attention as shown in **Equation
2**.
|  |
| :--: |
| Equation 2: The mathematical equation of self-attention (Source: Aritra and Ritwik) |
Upon looking at the equation of self-attention, we see that it is a quadratic equation.
Therefore, as the number of tokens increase, so does the computation time (cost too). To
mitigate this problem and make Transformers more interpretable, Yang et al.
have tried to replace the Self-Attention module with better components.
**The Solution**
Yang et al. introduce the Focal Modulation layer to serve as a
seamless replacement for the Self-Attention Layer. The layer boasts high
interpretability, making it a valuable tool for Deep Learning practitioners.
In this tutorial, we will delve into the practical application of this layer by training
the entire model on the CIFAR-10 dataset and visually interpreting the layer's
performance.
Note: We try to align our implementation with the
[official implementation](https://github.com/microsoft/FocalNet).
"""
"""
## Setup and Imports
We use tensorflow version `2.11.0` for this tutorial.
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.optimizers.experimental import AdamW
from typing import Optional, Tuple, List
from matplotlib import pyplot as plt
from random import randint
# Set seed for reproducibility.
tf.keras.utils.set_random_seed(42)
"""
## Global Configuration
We do not have any strong rationale behind choosing these hyperparameters. Please feel
free to change the configuration and train the model.
"""
# DATA
TRAIN_SLICE = 40000
BUFFER_SIZE = 2048
BATCH_SIZE = 1024
AUTO = tf.data.AUTOTUNE
INPUT_SHAPE = (32, 32, 3)
IMAGE_SIZE = 48
NUM_CLASSES = 10
# OPTIMIZER
LEARNING_RATE = 1e-4
WEIGHT_DECAY = 1e-4
# TRAINING
EPOCHS = 25
"""
## Load and process the CIFAR-10 dataset
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
(x_train, y_train), (x_val, y_val) = (
(x_train[:TRAIN_SLICE], y_train[:TRAIN_SLICE]),
(x_train[TRAIN_SLICE:], y_train[TRAIN_SLICE:]),
)
"""
### Build the augmentations
We use the `keras.Sequential` API to compose all the individual augmentation steps
into one API.
"""
# Build the `train` augmentation pipeline.
train_aug = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
layers.RandomFlip("horizontal"),
],
name="train_data_augmentation",
)
# Build the `val` and `test` data pipeline.
test_aug = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),
],
name="test_data_augmentation",
)
"""
### Build `tf.data` pipeline
"""
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = (
train_ds.map(
lambda image, label: (train_aug(image), label), num_parallel_calls=AUTO
)
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_ds = (
val_ds.map(lambda image, label: (test_aug(image), label), num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = (
test_ds.map(lambda image, label: (test_aug(image), label), num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
"""
## Architecture
We pause here to take a quick look at the Architecture of the Focal Modulation Network.
**Figure 1** shows how every individual layer is compiled into a single model. This gives
us a bird's eye view of the entire architecture.
|  |
| :--: |
| Figure 1: A diagram of the Focal Modulation model (Source: Aritra and Ritwik) |
We dive deep into each of these layers in the following sections. This is the order we
will follow:
- Patch Embedding Layer
- Focal Modulation Block
- Multi-Layer Perceptron
- Focal Modulation Layer
- Hierarchical Contextualization
- Gated Aggregation
- Building Focal Modulation Block
- Building the Basic Layer
To better understand the architecture in a format we are well versed in, let us see how
the Focal Modulation Network would look when drawn like a Transformer architecture.
**Figure 2** shows the encoder layer of a traditional Transformer architecture where Self
Attention is replaced with the Focal Modulation layer.
The <font color="blue">blue</font> blocks represent the Focal Modulation block. A stack
of these blocks builds a single Basic Layer. The <font color="green">green</font> blocks
represent the Focal Modulation layer.
|  |
| :--: |
| Figure 2: The Entire Architecture (Source: Aritra and Ritwik) |
"""
"""
## Patch Embedding Layer
The patch embedding layer is used to patchify the input images and project them into a
latent space. This layer is also used as the down-sampling layer in the architecture.
"""
class PatchEmbed(layers.Layer):
"""Image patch embedding layer, also acts as the down-sampling layer.
Args:
image_size (Tuple[int]): Input image resolution.
patch_size (Tuple[int]): Patch spatial resolution.
embed_dim (int): Embedding dimension.
"""
def __init__(
self,
image_size: Tuple[int] = (224, 224),
patch_size: Tuple[int] = (4, 4),
embed_dim: int = 96,
**kwargs,
):
super().__init__(**kwargs)
patch_resolution = [
image_size[0] // patch_size[0],
image_size[1] // patch_size[1],
]
self.image_size = image_size
self.patch_size = patch_size
self.embed_dim = embed_dim
self.patch_resolution = patch_resolution
self.num_patches = patch_resolution[0] * patch_resolution[1]
self.proj = layers.Conv2D(
filters=embed_dim, kernel_size=patch_size, strides=patch_size
)
self.flatten = layers.Reshape(target_shape=(-1, embed_dim))
self.norm = keras.layers.LayerNormalization(epsilon=1e-7)
def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, int, int, int]:
"""Patchifies the image and converts into tokens.
Args:
x: Tensor of shape (B, H, W, C)
Returns:
A tuple of the processed tensor, height of the projected
feature map, width of the projected feature map, number
of channels of the projected feature map.
"""
# Project the inputs.
x = self.proj(x)
# Obtain the shape from the projected tensor.
height = tf.shape(x)[1]
width = tf.shape(x)[2]
channels = tf.shape(x)[3]
# B, H, W, C -> B, H*W, C
x = self.norm(self.flatten(x))
return x, height, width, channels
"""
## Focal Modulation block
A Focal Modulation block can be considered as a single Transformer Block with the Self
Attention (SA) module being replaced with Focal Modulation module, as we saw in **Figure
2**.
Let us recall how a focal modulation block is supposed to look like with the aid of the
**Figure 3**.
|  |
| :--: |
| Figure 3: The isolated view of the Focal Modulation Block (Source: Aritra and Ritwik) |
The Focal Modulation Block consists of:
- Multilayer Perceptron
- Focal Modulation layer
"""
"""
### Multilayer Perceptron
"""
def MLP(
in_features: int,
hidden_features: Optional[int] = None,
out_features: Optional[int] = None,
mlp_drop_rate: float = 0.0,
):
hidden_features = hidden_features or in_features
out_features = out_features or in_features
return keras.Sequential(
[
layers.Dense(units=hidden_features, activation=keras.activations.gelu),
layers.Dense(units=out_features),
layers.Dropout(rate=mlp_drop_rate),
]
)
"""
### Focal Modulation layer
In a typical Transformer architecture, for each visual token (**query**) `x_i in R^C` in
an input feature map `X in R^{HxWxC}` a **generic encoding process** produces a feature
representation `y_i in R^C`.
The encoding process consists of **interaction** (with its surroundings for e.g. a dot
product), and **aggregation** (over the contexts for e.g weighted mean).
We will talk about two types of encoding here:
- Interaction and then Aggregation in **Self-Attention**
- Aggregation and then Interaction in **Focal Modulation**
**Self-Attention**
|  |
| :--: |
| **Figure 4**: Self-Attention module. (Source: Aritra and Ritwik) |
|  |
| :--: |
| **Equation 3:** Aggregation and Interaction in Self-Attention(Surce: Aritra and Ritwik)|
As shown in **Figure 4** the query and the key interact (in the interaction step) with
each other to output the attention scores. The weighted aggregation of the value comes
next, known as the aggregation step.
**Focal Modulation**
|  |
| :--: |
| **Figure 5**: Focal Modulation module. (Source: Aritra and Ritwik) |
|  |
| :--: |
| **Equation 4:** Aggregation and Interaction in Focal Modulation (Source: Aritra and Ritwik) |
**Figure 5** depicts the Focal Modulation layer. `q()` is the query projection
function. It is a **linear layer** that projects the query into a latent space. `m ()` is
the context aggregation function. Unlike self-attention, the
aggregation step takes place in focal modulation before the interaction step.
"""
"""
While `q()` is pretty straightforward to understand, the context aggregation function
`m()` is more complex. Therefore, this section will focus on `m()`.
| |
| :--: |
| **Figure 6**: Context Aggregation function `m()`. (Source: Aritra and Ritwik) |
The context aggregation function `m()` consists of two parts as shown in **Figure 6**:
- Hierarchical Contextualization
- Gated Aggregation
"""
"""
#### Hierarchical Contextualization
| |
| :--: |
| **Figure 7**: Hierarchical Contextualization (Source: Aritra and Ritwik) |
In **Figure 7**, we see that the input is first projected linearly. This linear projection
produces `Z^0`. Where `Z^0` can be expressed as follows:
|  |
| :--: |
| Equation 5: Linear projection of `Z^0` (Source: Aritra and Ritwik) |
`Z^0` is then passed on to a series of Depth-Wise (DWConv) Conv and
[GeLU](https://www.tensorflow.org/api_docs/python/tf/keras/activations/gelu) layers. The
authors term each block of DWConv and GeLU as levels denoted by `l`. In **Figure 6** we
have two levels. Mathematically this is represented as:
|  |
| :--: |
| Equation 6: Levels of the modulation layer (Source: Aritra and Ritwik) |
where `l in {1, ... , L}`
The final feature map goes through a Global Average Pooling Layer. This can be expressed
as follows:
|  |
| :--: |
| Equation 7: Average Pooling of the final feature (Source: Aritra and Ritwik)|
"""
"""
#### Gated Aggregation
| |
| :--: |
| **Figure 8**: Gated Aggregation (Source: Aritra and Ritwik) |
Now that we have `L+1` intermediate feature maps by virtue of the Hierarchical
Contextualization step, we need a gating mechanism that lets some features pass and
prohibits others. This can be implemented with the attention module.
Later in the tutorial, we will visualize these gates to better understand their
usefulness.
First, we build the weights for aggregation. Here we apply a **linear layer** on the input
feature map that projects it into `L+1` dimensions.
|  |
| :--: |
| Eqation 8: Gates (Source: Aritra and Ritwik) |
Next we perform the weighted aggregation over the contexts.
|  |
| :--: |
| Eqation 9: Final feature map (Source: Aritra and Ritwik) |
To enable communication across different channels, we use another linear layer `h()`
to obtain the modulator
|  |
| :--: |
| Eqation 10: Modulator (Source: Aritra and Ritwik) |
To sum up the Focal Modulation layer we have:
|  |
| :--: |
| Eqation 11: Focal Modulation Layer (Source: Aritra and Ritwik) |
"""
class FocalModulationLayer(layers.Layer):
"""The Focal Modulation layer includes query projection & context aggregation.
Args:
dim (int): Projection dimension.
focal_window (int): Window size for focal modulation.
focal_level (int): The current focal level.
focal_factor (int): Factor of focal modulation.
proj_drop_rate (float): Rate of dropout.
"""
def __init__(
self,
dim: int,
focal_window: int,
focal_level: int,
focal_factor: int = 2,
proj_drop_rate: float = 0.0,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.focal_window = focal_window
self.focal_level = focal_level
self.focal_factor = focal_factor
self.proj_drop_rate = proj_drop_rate
# Project the input feature into a new feature space using a
# linear layer. Note the `units` used. We will be projecting the input
# feature all at once and split the projection into query, context,
# and gates.
self.initial_proj = layers.Dense(
units=(2 * self.dim) + (self.focal_level + 1),
use_bias=True,
)
self.focal_layers = list()
self.kernel_sizes = list()
for idx in range(self.focal_level):
kernel_size = (self.focal_factor * idx) + self.focal_window
depth_gelu_block = keras.Sequential(
[
layers.ZeroPadding2D(padding=(kernel_size // 2, kernel_size // 2)),
layers.Conv2D(
filters=self.dim,
kernel_size=kernel_size,
activation=keras.activations.gelu,
groups=self.dim,
use_bias=False,
),
]
)
self.focal_layers.append(depth_gelu_block)
self.kernel_sizes.append(kernel_size)
self.activation = keras.activations.gelu
self.gap = layers.GlobalAveragePooling2D(keepdims=True)
self.modulator_proj = layers.Conv2D(
filters=self.dim,
kernel_size=(1, 1),
use_bias=True,
)
self.proj = layers.Dense(units=self.dim)
self.proj_drop = layers.Dropout(self.proj_drop_rate)
def call(self, x: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor:
"""Forward pass of the layer.
Args:
x: Tensor of shape (B, H, W, C)
"""
# Apply the linear projecion to the input feature map
x_proj = self.initial_proj(x)
# Split the projected x into query, context and gates
query, context, self.gates = tf.split(
value=x_proj,
num_or_size_splits=[self.dim, self.dim, self.focal_level + 1],
axis=-1,
)
# Context aggregation
context = self.focal_layers[0](context)
context_all = context * self.gates[..., 0:1]
for idx in range(1, self.focal_level):
context = self.focal_layers[idx](context)
context_all += context * self.gates[..., idx : idx + 1]
# Build the global context
context_global = self.activation(self.gap(context))
context_all += context_global * self.gates[..., self.focal_level :]
# Focal Modulation
self.modulator = self.modulator_proj(context_all)
x_output = query * self.modulator
# Project the output and apply dropout
x_output = self.proj(x_output)
x_output = self.proj_drop(x_output)
return x_output
"""
### The Focal Modulation block
Finally, we have all the components we need to build the Focal Modulation block. Here we
take the MLP and Focal Modulation layer together and build the Focal Modulation block.
"""
class FocalModulationBlock(layers.Layer):
"""Combine FFN and Focal Modulation Layer.
Args:
dim (int): Number of input channels.
input_resolution (Tuple[int]): Input resulotion.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float): Dropout rate.
drop_path (float): Stochastic depth rate.
focal_level (int): Number of focal levels.
focal_window (int): Focal window size at first focal level
"""
def __init__(
self,
dim: int,
input_resolution: Tuple[int],
mlp_ratio: float = 4.0,
drop: float = 0.0,
drop_path: float = 0.0,
focal_level: int = 1,
focal_window: int = 3,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.input_resolution = input_resolution
self.mlp_ratio = mlp_ratio
self.focal_level = focal_level
self.focal_window = focal_window
self.norm = layers.LayerNormalization(epsilon=1e-5)
self.modulation = FocalModulationLayer(
dim=self.dim,
focal_window=self.focal_window,
focal_level=self.focal_level,
proj_drop_rate=drop,
)
mlp_hidden_dim = int(self.dim * self.mlp_ratio)
self.mlp = MLP(
in_features=self.dim,
hidden_features=mlp_hidden_dim,
mlp_drop_rate=drop,
)
def call(self, x: tf.Tensor, height: int, width: int, channels: int) -> tf.Tensor:
"""Processes the input tensor through the focal modulation block.
Args:
x (tf.Tensor): Inputs of the shape (B, L, C)
height (int): The height of the feature map
width (int): The width of the feature map
channels (int): The number of channels of the feature map
Returns:
The processed tensor.
"""
shortcut = x
# Focal Modulation
x = tf.reshape(x, shape=(-1, height, width, channels))
x = self.modulation(x)
x = tf.reshape(x, shape=(-1, height * width, channels))
# FFN
x = shortcut + x
x = x + self.mlp(self.norm(x))
return x
"""
## The Basic Layer
The basic layer consists of a collection of Focal Modulation blocks. This is
illustrated in **Figure 9**.
|  |
| :--: |
| **Figure 9**: Basic Layer, a collection of focal modulation blocks. (Source: Aritra and Ritwik) |
Notice how in **Fig. 9** there are more than one focal modulation blocks denoted by `Nx`.
This shows how the Basic Layer is a collection of Focal Modulation blocks.
"""
class BasicLayer(layers.Layer):
"""Collection of Focal Modulation Blocks.
Args:
dim (int): Dimensions of the model.
out_dim (int): Dimension used by the Patch Embedding Layer.
input_resolution (Tuple[int]): Input image resolution.
depth (int): The number of Focal Modulation Blocks.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float): Dropout rate.
downsample (tf.keras.layers.Layer): Downsampling layer at the end of the layer.
focal_level (int): The current focal level.
focal_window (int): Focal window used.
"""
def __init__(
self,
dim: int,
out_dim: int,
input_resolution: Tuple[int],
depth: int,
mlp_ratio: float = 4.0,
drop: float = 0.0,
downsample=None,
focal_level: int = 1,
focal_window: int = 1,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.blocks = [
FocalModulationBlock(
dim=dim,
input_resolution=input_resolution,
mlp_ratio=mlp_ratio,
drop=drop,
focal_level=focal_level,
focal_window=focal_window,
)
for i in range(self.depth)
]
# Downsample layer at the end of the layer
if downsample is not None:
self.downsample = downsample(
image_size=input_resolution,
patch_size=(2, 2),
embed_dim=out_dim,
)
else:
self.downsample = None
def call(
self, x: tf.Tensor, height: int, width: int, channels: int
) -> Tuple[tf.Tensor, int, int, int]:
"""Forward pass of the layer.
Args:
x (tf.Tensor): Tensor of shape (B, L, C)
height (int): Height of feature map
width (int): Width of feature map
channels (int): Embed Dim of feature map
Returns:
A tuple of the processed tensor, changed height, width, and
dim of the tensor.
"""
# Apply Focal Modulation Blocks
for block in self.blocks:
x = block(x, height, width, channels)
# Except the last Basic Layer, all the layers have
# downsample at the end of it.
if self.downsample is not None:
x = tf.reshape(x, shape=(-1, height, width, channels))
x, height_o, width_o, channels_o = self.downsample(x)
else:
height_o, width_o, channels_o = height, width, channels
return x, height_o, width_o, channels_o
"""
## The Focal Modulation Network model
This is the model that ties everything together.
It consists of a collection of Basic Layers with a classification head.
For a recap of how this is structured refer to **Figure 1**.
"""
class FocalModulationNetwork(keras.Model):
"""The Focal Modulation Network.
Parameters:
image_size (Tuple[int]): Spatial size of images used.
patch_size (Tuple[int]): Patch size of each patch.
num_classes (int): Number of classes used for classification.
embed_dim (int): Patch embedding dimension.
depths (List[int]): Depth of each Focal Transformer block.
mlp_ratio (float): Ratio of expansion for the intermediate layer of MLP.
drop_rate (float): The dropout rate for FM and MLP layers.
focal_levels (list): How many focal levels at all stages.
Note that this excludes the finest-grain level.
focal_windows (list): The focal window size at all stages.
"""
def __init__(
self,
image_size: Tuple[int] = (48, 48),
patch_size: Tuple[int] = (4, 4),
num_classes: int = 10,
embed_dim: int = 256,
depths: List[int] = [2, 3, 2],
mlp_ratio: float = 4.0,
drop_rate: float = 0.1,
focal_levels=[2, 2, 2],
focal_windows=[3, 3, 3],
**kwargs,
):
super().__init__(**kwargs)
self.num_layers = len(depths)
embed_dim = [embed_dim * (2**i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
self.patch_embed = PatchEmbed(
image_size=image_size,
patch_size=patch_size,
embed_dim=embed_dim[0],
)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patch_resolution
self.patches_resolution = patches_resolution
self.pos_drop = layers.Dropout(drop_rate)
self.basic_layers = list()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=embed_dim[i_layer],
out_dim=(
embed_dim[i_layer + 1] if (i_layer < self.num_layers - 1) else None
),
input_resolution=(
patches_resolution[0] // (2**i_layer),
patches_resolution[1] // (2**i_layer),
),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
)
self.basic_layers.append(layer)
self.norm = keras.layers.LayerNormalization(epsilon=1e-7)
self.avgpool = layers.GlobalAveragePooling1D()
self.flatten = layers.Flatten()
self.head = layers.Dense(self.num_classes, activation="softmax")
def call(self, x: tf.Tensor) -> tf.Tensor:
"""Forward pass of the layer.
Args:
x: Tensor of shape (B, H, W, C)
Returns:
The logits.
"""
# Patch Embed the input images.
x, height, width, channels = self.patch_embed(x)
x = self.pos_drop(x)
for idx, layer in enumerate(self.basic_layers):
x, height, width, channels = layer(x, height, width, channels)
x = self.norm(x)
x = self.avgpool(x)
x = self.flatten(x)
x = self.head(x)
return x
"""
## Train the model
Now with all the components in place and the architecture actually built, we are ready to
put it to good use.
In this section, we train our Focal Modulation model on the CIFAR-10 dataset.
"""
"""
### Visualization Callback
A key feature of the Focal Modulation Network is explicit input-dependency. This means
the modulator is calculated by looking at the local features around the target location,
so it depends on the input. In very simple terms, this makes interpretation easy. We can
simply lay down the gating values and the original image, next to each other to see how
the gating mechanism works.
The authors of the paper visualize the gates and the modulator in order to focus on the
interpretability of the Focal Modulation layer. Below is a visualization
callback that shows the gates and modulator of a specific layer in the model while the
model trains.
We will notice later that as the model trains, the visualizations get better.
The gates appear to selectively permit certain aspects of the input image to pass
through, while gently disregarding others, ultimately leading to improved classification
accuracy.
"""
def display_grid(
test_images: tf.Tensor,
gates: tf.Tensor,
modulator: tf.Tensor,
):
"""Displays the image with the gates and modulator overlayed.
Args:
test_images (tf.Tensor): A batch of test images.
gates (tf.Tensor): The gates of the Focal Modualtion Layer.
modulator (tf.Tensor): The modulator of the Focal Modulation Layer.
"""
fig, ax = plt.subplots(nrows=1, ncols=5, figsize=(25, 5))
# Radomly sample an image from the batch.
index = randint(0, BATCH_SIZE - 1)
orig_image = test_images[index]
gate_image = gates[index]
modulator_image = modulator[index]
# Original Image
ax[0].imshow(orig_image)
ax[0].set_title("Original:")
ax[0].axis("off")
for index in range(1, 5):
img = ax[index].imshow(orig_image)
if index != 4:
overlay_image = gate_image[..., index - 1]
title = f"G {index}:"
else:
overlay_image = tf.norm(modulator_image, ord=2, axis=-1)
title = f"MOD:"
ax[index].imshow(
overlay_image, cmap="inferno", alpha=0.6, extent=img.get_extent()
)
ax[index].set_title(title)
ax[index].axis("off")
plt.axis("off")
plt.show()
plt.close()
"""
### TrainMonitor
"""
# Taking a batch of test inputs to measure the model's progress.
test_images, test_labels = next(iter(test_ds))
upsampler = tf.keras.layers.UpSampling2D(
size=(4, 4),
interpolation="bilinear",
)
class TrainMonitor(keras.callbacks.Callback):
def __init__(self, epoch_interval=None):
self.epoch_interval = epoch_interval
def on_epoch_end(self, epoch, logs=None):
if self.epoch_interval and epoch % self.epoch_interval == 0:
_ = self.model(test_images)
# Take the mid layer for visualization
gates = self.model.basic_layers[1].blocks[-1].modulation.gates
gates = upsampler(gates)
modulator = self.model.basic_layers[1].blocks[-1].modulation.modulator
modulator = upsampler(modulator)
# Display the grid of gates and modulator.
display_grid(test_images=test_images, gates=gates, modulator=modulator)
"""
### Learning Rate scheduler
"""
# Some code is taken from:
# https://www.kaggle.com/ashusma/training-rfcx-tensorflow-tpu-effnet-b2.
class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps
):
super().__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.pi = tf.constant(np.pi)
def __call__(self, step):
if self.total_steps < self.warmup_steps:
raise ValueError("Total_steps must be larger or equal to warmup_steps.")
cos_annealed_lr = tf.cos(
self.pi
* (tf.cast(step, tf.float32) - self.warmup_steps)
/ float(self.total_steps - self.warmup_steps)
)
learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr)
if self.warmup_steps > 0:
if self.learning_rate_base < self.warmup_learning_rate:
raise ValueError(
"Learning_rate_base must be larger or equal to "
"warmup_learning_rate."
)
slope = (
self.learning_rate_base - self.warmup_learning_rate
) / self.warmup_steps
warmup_rate = slope * tf.cast(step, tf.float32) + self.warmup_learning_rate
learning_rate = tf.where(
step < self.warmup_steps, warmup_rate, learning_rate
)
return tf.where(
step > self.total_steps, 0.0, learning_rate, name="learning_rate"
)
total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS)
warmup_epoch_percentage = 0.15
warmup_steps = int(total_steps * warmup_epoch_percentage)
scheduled_lrs = WarmUpCosine(
learning_rate_base=LEARNING_RATE,
total_steps=total_steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
)
"""
### Initialize, compile and train the model
"""
focal_mod_net = FocalModulationNetwork()
optimizer = AdamW(learning_rate=scheduled_lrs, weight_decay=WEIGHT_DECAY)
# Compile and train the model.
focal_mod_net.compile(
optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
history = focal_mod_net.fit(
train_ds,
epochs=EPOCHS,
validation_data=val_ds,
callbacks=[TrainMonitor(epoch_interval=10)],
)
"""
## Plot loss and accuracy
"""
plt.plot(history.history["loss"], label="loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.legend()
plt.show()
plt.plot(history.history["accuracy"], label="accuracy")
plt.plot(history.history["val_accuracy"], label="val_accuracy")
plt.legend()
plt.show()
"""
## Test visualizations
Let's test our model on some test images and see how the gates look like.
"""
test_images, test_labels = next(iter(test_ds))
_ = focal_mod_net(test_images)
# Take the mid layer for visualization
gates = focal_mod_net.basic_layers[1].blocks[-1].modulation.gates
gates = upsampler(gates)
modulator = focal_mod_net.basic_layers[1].blocks[-1].modulation.modulator
modulator = upsampler(modulator)
# Plot the test images with the gates and modulator overlayed.
for row in range(5):
display_grid(
test_images=test_images,
gates=gates,
modulator=modulator,
)
"""
## Conclusion
The proposed architecture, the Focal Modulation Network
architecture is a mechanism that allows different
parts of an image to interact with each other in a way that depends on the image itself.
It works by first gathering different levels of context information around each part of
the image (the "query token"), then using a gate to decide which context information is
most relevant, and finally combining the chosen information in a simple but effective
way.
This is meant as a replacement of Self-Attention mechanism from the Transformer
architecture. The key feature that makes this research notable is not the conception of
attention-less networks, but rather the introduction of a equally powerful architecture
that is interpretable.
The authors also mention that they created a series of Focal Modulation Networks
(FocalNets) that significantly outperform Self-Attention counterparts and with a fraction
of parameters and pretraining data.
The FocalNets architecture has the potential to deliver impressive results and offers a
simple implementation. Its promising performance and ease of use make it an attractive
alternative to Self-Attention for researchers to explore in their own projects. It could
potentially become widely adopted by the Deep Learning community in the near future.
## Acknowledgement
We would like to thank [PyImageSearch](https://pyimagesearch.com/) for providing with a
Colab Pro account, [JarvisLabs.ai](https://cloud.jarvislabs.ai/) for GPU credits,
and also Microsoft Research for providing an
[official implementation](https://github.com/microsoft/FocalNet) of their paper.
We would also like to extend our gratitude to the first author of the
paper [Jianwei Yang](https://twitter.com/jw2yang4ai) who reviewed this tutorial
extensively.
"""
| keras-io/examples/vision/focal_modulation_network.py/0 | {
"file_path": "keras-io/examples/vision/focal_modulation_network.py",
"repo_id": "keras-io",
"token_count": 14237
} | 108 |
<jupyter_start><jupyter_text>Distilling Vision Transformers**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2022/04/05**Last modified:** 2022/04/08**Description:** Distillation of Vision Transformers through attention. IntroductionIn the original *Vision Transformers* (ViT) paper([Dosovitskiy et al.](https://arxiv.org/abs/2010.11929)),the authors concluded that to perform on par with Convolutional Neural Networks (CNNs),ViTs need to be pre-trained on larger datasets. The larger the better. This is mainlydue to the lack of inductive biases in the ViT architecture -- unlike CNNs,they don't have layers that exploit locality. In a follow-up paper([Steiner et al.](https://arxiv.org/abs/2106.10270)),the authors show that it is possible to substantially improve the performance of ViTswith stronger regularization and longer training.Many groups have proposed different ways to deal with the problemof data-intensiveness of ViT training.One such way was shown in the *Data-efficient image Transformers*,(DeiT) paper ([Touvron et al.](https://arxiv.org/abs/2012.12877)). Theauthors introduced a distillation technique that is specific to transformer-based visionmodels. DeiT is among the first works to show that it's possible to train ViTs wellwithout using larger datasets.In this example, we implement the distillation recipe proposed in DeiT. Thisrequires us to slightly tweak the original ViT architecture and write a custom trainingloop to implement the distillation recipe.To run the example, you'll need TensorFlow Addons, which you can install with thefollowing command:```pip install tensorflow-addons```To comfortably navigate through this example, you'll be expected to know how a ViT andknowledge distillation work. The following are good resources in case you needed arefresher:* [ViT on keras.io](https://keras.io/examples/vision/image_classification_with_vision_transformer)* [Knowledge distillation on keras.io](https://keras.io/examples/vision/knowledge_distillation/) Imports<jupyter_code>from typing import List
import tensorflow as tf
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
import tensorflow_hub as hub
from tensorflow import keras
from tensorflow.keras import layers
tfds.disable_progress_bar()
tf.keras.utils.set_random_seed(42)<jupyter_output><empty_output><jupyter_text>Constants<jupyter_code># Model
MODEL_TYPE = "deit_distilled_tiny_patch16_224"
RESOLUTION = 224
PATCH_SIZE = 16
NUM_PATCHES = (RESOLUTION // PATCH_SIZE) ** 2
LAYER_NORM_EPS = 1e-6
PROJECTION_DIM = 192
NUM_HEADS = 3
NUM_LAYERS = 12
MLP_UNITS = [
PROJECTION_DIM * 4,
PROJECTION_DIM,
]
DROPOUT_RATE = 0.0
DROP_PATH_RATE = 0.1
# Training
NUM_EPOCHS = 20
BASE_LR = 0.0005
WEIGHT_DECAY = 0.0001
# Data
BATCH_SIZE = 256
AUTO = tf.data.AUTOTUNE
NUM_CLASSES = 5<jupyter_output><empty_output><jupyter_text>You probably noticed that `DROPOUT_RATE` has been set 0.0. Dropout has been usedin the implementation to keep it complete. For smaller models (like the one used inthis example), you don't need it, but for bigger models, using dropout helps. Load the `tf_flowers` dataset and prepare preprocessing utilitiesThe authors use an array of different augmentation techniques, including MixUp([Zhang et al.](https://arxiv.org/abs/1710.09412)),RandAugment ([Cubuk et al.](https://arxiv.org/abs/1909.13719)),and so on. However, to keep the example simple to work through, we'll discard them.<jupyter_code>def preprocess_dataset(is_training=True):
def fn(image, label):
if is_training:
# Resize to a bigger spatial resolution and take the random
# crops.
image = tf.image.resize(image, (RESOLUTION + 20, RESOLUTION + 20))
image = tf.image.random_crop(image, (RESOLUTION, RESOLUTION, 3))
image = tf.image.random_flip_left_right(image)
else:
image = tf.image.resize(image, (RESOLUTION, RESOLUTION))
label = tf.one_hot(label, depth=NUM_CLASSES)
return image, label
return fn
def prepare_dataset(dataset, is_training=True):
if is_training:
dataset = dataset.shuffle(BATCH_SIZE * 10)
dataset = dataset.map(preprocess_dataset(is_training), num_parallel_calls=AUTO)
return dataset.batch(BATCH_SIZE).prefetch(AUTO)
train_dataset, val_dataset = tfds.load(
"tf_flowers", split=["train[:90%]", "train[90%:]"], as_supervised=True
)
num_train = train_dataset.cardinality()
num_val = val_dataset.cardinality()
print(f"Number of training examples: {num_train}")
print(f"Number of validation examples: {num_val}")
train_dataset = prepare_dataset(train_dataset, is_training=True)
val_dataset = prepare_dataset(val_dataset, is_training=False)<jupyter_output><empty_output><jupyter_text>Implementing the DeiT variants of ViTSince DeiT is an extension of ViT it'd make sense to first implement ViT and then extendit to support DeiT's components.First, we'll implement a layer for Stochastic Depth([Huang et al.](https://arxiv.org/abs/1603.09382))which is used in DeiT for regularization.<jupyter_code># Referred from: github.com:rwightman/pytorch-image-models.
class StochasticDepth(layers.Layer):
def __init__(self, drop_prop, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prop
def call(self, x, training=True):
if training:
keep_prob = 1 - self.drop_prob
shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
random_tensor = tf.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x<jupyter_output><empty_output><jupyter_text>Now, we'll implement the MLP and Transformer blocks.<jupyter_code>def mlp(x, dropout_rate: float, hidden_units: List):
"""FFN for a Transformer block."""
# Iterate over the hidden units and
# add Dense => Dropout.
for (idx, units) in enumerate(hidden_units):
x = layers.Dense(
units,
activation=tf.nn.gelu if idx == 0 else None,
)(x)
x = layers.Dropout(dropout_rate)(x)
return x
def transformer(drop_prob: float, name: str) -> keras.Model:
"""Transformer block with pre-norm."""
num_patches = NUM_PATCHES + 2 if "distilled" in MODEL_TYPE else NUM_PATCHES + 1
encoded_patches = layers.Input((num_patches, PROJECTION_DIM))
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(encoded_patches)
# Multi Head Self Attention layer 1.
attention_output = layers.MultiHeadAttention(
num_heads=NUM_HEADS,
key_dim=PROJECTION_DIM,
dropout=DROPOUT_RATE,
)(x1, x1)
attention_output = (
StochasticDepth(drop_prob)(attention_output) if drop_prob else attention_output
)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x2)
# MLP layer 1.
x4 = mlp(x3, hidden_units=MLP_UNITS, dropout_rate=DROPOUT_RATE)
x4 = StochasticDepth(drop_prob)(x4) if drop_prob else x4
# Skip connection 2.
outputs = layers.Add()([x2, x4])
return keras.Model(encoded_patches, outputs, name=name)<jupyter_output><empty_output><jupyter_text>We'll now implement a `ViTClassifier` class building on top of the components we justdeveloped. Here we'll be following the original pooling strategy used in the ViT paper --use a class token and use the feature representations corresponding to it forclassification.<jupyter_code>class ViTClassifier(keras.Model):
"""Vision Transformer base class."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Patchify + linear projection + reshaping.
self.projection = keras.Sequential(
[
layers.Conv2D(
filters=PROJECTION_DIM,
kernel_size=(PATCH_SIZE, PATCH_SIZE),
strides=(PATCH_SIZE, PATCH_SIZE),
padding="VALID",
name="conv_projection",
),
layers.Reshape(
target_shape=(NUM_PATCHES, PROJECTION_DIM),
name="flatten_projection",
),
],
name="projection",
)
# Positional embedding.
init_shape = (
1,
NUM_PATCHES + 1,
PROJECTION_DIM,
)
self.positional_embedding = tf.Variable(
tf.zeros(init_shape), name="pos_embedding"
)
# Transformer blocks.
dpr = [x for x in tf.linspace(0.0, DROP_PATH_RATE, NUM_LAYERS)]
self.transformer_blocks = [
transformer(drop_prob=dpr[i], name=f"transformer_block_{i}")
for i in range(NUM_LAYERS)
]
# CLS token.
initial_value = tf.zeros((1, 1, PROJECTION_DIM))
self.cls_token = tf.Variable(
initial_value=initial_value, trainable=True, name="cls"
)
# Other layers.
self.dropout = layers.Dropout(DROPOUT_RATE)
self.layer_norm = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)
self.head = layers.Dense(
NUM_CLASSES,
name="classification_head",
)
def call(self, inputs, training=True):
n = tf.shape(inputs)[0]
# Create patches and project the patches.
projected_patches = self.projection(inputs)
# Append class token if needed.
cls_token = tf.tile(self.cls_token, (n, 1, 1))
cls_token = tf.cast(cls_token, projected_patches.dtype)
projected_patches = tf.concat([cls_token, projected_patches], axis=1)
# Add positional embeddings to the projected patches.
encoded_patches = (
self.positional_embedding + projected_patches
) # (B, number_patches, projection_dim)
encoded_patches = self.dropout(encoded_patches)
# Iterate over the number of layers and stack up blocks of
# Transformer.
for transformer_module in self.transformer_blocks:
# Add a Transformer block.
encoded_patches = transformer_module(encoded_patches)
# Final layer normalization.
representation = self.layer_norm(encoded_patches)
# Pool representation.
encoded_patches = representation[:, 0]
# Classification head.
output = self.head(encoded_patches)
return output<jupyter_output><empty_output><jupyter_text>This class can be used standalone as ViT and is end-to-end trainable. Just remove the`distilled` phrase in `MODEL_TYPE` and it should work with `vit_tiny = ViTClassifier()`.Let's now extend it to DeiT. The following figure presents the schematic of DeiT (takenfrom the DeiT paper):Apart from the class token, DeiT has another token for distillation. During distillation,the logits corresponding to the class token are compared to the true labels, and thelogits corresponding to the distillation token are compared to the teacher's predictions.<jupyter_code>class ViTDistilled(ViTClassifier):
def __init__(self, regular_training=False, **kwargs):
super().__init__(**kwargs)
self.num_tokens = 2
self.regular_training = regular_training
# CLS and distillation tokens, positional embedding.
init_value = tf.zeros((1, 1, PROJECTION_DIM))
self.dist_token = tf.Variable(init_value, name="dist_token")
self.positional_embedding = tf.Variable(
tf.zeros(
(
1,
NUM_PATCHES + self.num_tokens,
PROJECTION_DIM,
)
),
name="pos_embedding",
)
# Head layers.
self.head = layers.Dense(
NUM_CLASSES,
name="classification_head",
)
self.head_dist = layers.Dense(
NUM_CLASSES,
name="distillation_head",
)
def call(self, inputs, training=True):
n = tf.shape(inputs)[0]
# Create patches and project the patches.
projected_patches = self.projection(inputs)
# Append the tokens.
cls_token = tf.tile(self.cls_token, (n, 1, 1))
dist_token = tf.tile(self.dist_token, (n, 1, 1))
cls_token = tf.cast(cls_token, projected_patches.dtype)
dist_token = tf.cast(dist_token, projected_patches.dtype)
projected_patches = tf.concat(
[cls_token, dist_token, projected_patches], axis=1
)
# Add positional embeddings to the projected patches.
encoded_patches = (
self.positional_embedding + projected_patches
) # (B, number_patches, projection_dim)
encoded_patches = self.dropout(encoded_patches)
# Iterate over the number of layers and stack up blocks of
# Transformer.
for transformer_module in self.transformer_blocks:
# Add a Transformer block.
encoded_patches = transformer_module(encoded_patches)
# Final layer normalization.
representation = self.layer_norm(encoded_patches)
# Classification heads.
x, x_dist = (
self.head(representation[:, 0]),
self.head_dist(representation[:, 1]),
)
if not training or self.regular_training:
# During standard train / finetune, inference average the classifier
# predictions.
return (x + x_dist) / 2
elif training:
# Only return separate classification predictions when training in distilled
# mode.
return x, x_dist<jupyter_output><empty_output><jupyter_text>Let's verify if the `ViTDistilled` class can be initialized and called as expected.<jupyter_code>deit_tiny_distilled = ViTDistilled()
dummy_inputs = tf.ones((2, 224, 224, 3))
outputs = deit_tiny_distilled(dummy_inputs, training=False)
print(outputs.shape)<jupyter_output><empty_output><jupyter_text>Implementing the trainerUnlike what happens in standard knowledge distillation([Hinton et al.](https://arxiv.org/abs/1503.02531)),where a temperature-scaled softmax is used as well as KL divergence,DeiT authors use the following loss function:Here,* CE is cross-entropy* `psi` is the softmax function* Z_s denotes student predictions* y denotes true labels* y_t denotes teacher predictions<jupyter_code>class DeiT(keras.Model):
# Reference:
# https://keras.io/examples/vision/knowledge_distillation/
def __init__(self, student, teacher, **kwargs):
super().__init__(**kwargs)
self.student = student
self.teacher = teacher
self.student_loss_tracker = keras.metrics.Mean(name="student_loss")
self.dist_loss_tracker = keras.metrics.Mean(name="distillation_loss")
@property
def metrics(self):
metrics = super().metrics
metrics.append(self.student_loss_tracker)
metrics.append(self.dist_loss_tracker)
return metrics
def compile(
self,
optimizer,
metrics,
student_loss_fn,
distillation_loss_fn,
):
super().compile(optimizer=optimizer, metrics=metrics)
self.student_loss_fn = student_loss_fn
self.distillation_loss_fn = distillation_loss_fn
def train_step(self, data):
# Unpack data.
x, y = data
# Forward pass of teacher
teacher_predictions = tf.nn.softmax(self.teacher(x, training=False), -1)
teacher_predictions = tf.argmax(teacher_predictions, -1)
with tf.GradientTape() as tape:
# Forward pass of student.
cls_predictions, dist_predictions = self.student(x / 255.0, training=True)
# Compute losses.
student_loss = self.student_loss_fn(y, cls_predictions)
distillation_loss = self.distillation_loss_fn(
teacher_predictions, dist_predictions
)
loss = (student_loss + distillation_loss) / 2
# Compute gradients.
trainable_vars = self.student.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights.
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the metrics configured in `compile()`.
student_predictions = (cls_predictions + dist_predictions) / 2
self.compiled_metrics.update_state(y, student_predictions)
self.dist_loss_tracker.update_state(distillation_loss)
self.student_loss_tracker.update_state(student_loss)
# Return a dict of performance.
results = {m.name: m.result() for m in self.metrics}
return results
def test_step(self, data):
# Unpack the data.
x, y = data
# Compute predictions.
y_prediction = self.student(x / 255.0, training=False)
# Calculate the loss.
student_loss = self.student_loss_fn(y, y_prediction)
# Update the metrics.
self.compiled_metrics.update_state(y, y_prediction)
self.student_loss_tracker.update_state(student_loss)
# Return a dict of performance.
results = {m.name: m.result() for m in self.metrics}
return results
def call(self, inputs):
return self.student(inputs / 255.0, training=False)<jupyter_output><empty_output><jupyter_text>Load the teacher modelThis model is based on the BiT family of ResNets([Kolesnikov et al.](https://arxiv.org/abs/1912.11370))fine-tuned on the `tf_flowers` dataset. You can refer to[this notebook](https://github.com/sayakpaul/deit-tf/blob/main/notebooks/bit-teacher.ipynb)to know how the training was performed. The teacher model has about 212 Million parameterswhich is about **40x more** than the student.<jupyter_code>!wget -q https://github.com/sayakpaul/deit-tf/releases/download/v0.1.0/bit_teacher_flowers.zip
!unzip -q bit_teacher_flowers.zip
bit_teacher_flowers = keras.models.load_model("bit_teacher_flowers")<jupyter_output><empty_output><jupyter_text>Training through distillation<jupyter_code>deit_tiny = ViTDistilled()
deit_distiller = DeiT(student=deit_tiny, teacher=bit_teacher_flowers)
lr_scaled = (BASE_LR / 512) * BATCH_SIZE
deit_distiller.compile(
optimizer=tfa.optimizers.AdamW(weight_decay=WEIGHT_DECAY, learning_rate=lr_scaled),
metrics=["accuracy"],
student_loss_fn=keras.losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=0.1
),
distillation_loss_fn=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
_ = deit_distiller.fit(train_dataset, validation_data=val_dataset, epochs=NUM_EPOCHS)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/deit.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/deit.ipynb",
"repo_id": "keras-io",
"token_count": 7554
} | 109 |
<jupyter_start><jupyter_text>Self-supervised contrastive learning with NNCLR**Author:** [Rishit Dagli](https://twitter.com/rishit_dagli)**Date created:** 2021/09/13**Last modified:** 2024/01/22**Description:** Implementation of NNCLR, a self-supervised learning method for computer vision. Introduction Self-supervised learningSelf-supervised representation learning aims to obtain robust representations of samplesfrom raw data without expensive labels or annotations. Early methods in this fieldfocused on defining pretraining tasks which involved a surrogate task on a domain with ampleweak supervision labels. Encoders trained to solve such tasks are expected tolearn general features that might be useful for other downstream tasks requiringexpensive annotations like image classification. Contrastive LearningA broad category of self-supervised learning techniques are those that use *contrastivelosses*, which have been used in a wide range of computer vision applications like[image similarity](https://www.jmlr.org/papers/v11/chechik10a.html),[dimensionality reduction (DrLIM)](http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf)and [face verification/identification](https://openaccess.thecvf.com/content_cvpr_2015/html/Schroff_FaceNet_A_Unified_2015_CVPR_paper.html).These methods learn a latent space that clusters positive samples together whilepushing apart negative samples. NNCLRIn this example, we implement NNCLR as proposed in the paper[With a Little Help from My Friends: Nearest-Neighbor Contrastive Learning of Visual Representations](https://arxiv.org/abs/2104.14548),by Google Research and DeepMind.NNCLR learns self-supervised representations that go beyond single-instance positives, whichallows for learning better features that are invariant to different viewpoints, deformations,and even intra-class variations.Clustering based methods offer a great approach to go beyond single instance positives,but assuming the entire cluster to be positives could hurt performance due to earlyover-generalization. Instead, NNCLR uses nearest neighbors in the learned representationspace as positives.In addition, NNCLR increases the performance of existing contrastive learning methods like[SimCLR](https://arxiv.org/abs/2002.05709)([Keras Example](https://keras.io/examples/vision/semisupervised_simclr))and reduces the reliance of self-supervised methods on data augmentation strategies.Here is a great visualization by the paper authors showing how NNCLR builds on ideas fromSimCLR:We can see that SimCLR uses two views of the same image as the positive pair. These twoviews, which are produced using random data augmentations, are fed through an encoder toobtain the positive embedding pair, we end up using two augmentations. NNCLR insteadkeeps a _support set_ of embeddings representing the full data distribution, and formsthe positive pairs using nearest-neighbours. A support set is used as memory duringtraining, similar to a queue (i.e. first-in-first-out) as in[MoCo](https://arxiv.org/abs/1911.05722).This example requires `tensorflow_datasets`, which canbe installed with this command:<jupyter_code>!pip install tensorflow-datasets<jupyter_output><empty_output><jupyter_text>Setup<jupyter_code>import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import keras_cv
from keras import ops
from keras import layers<jupyter_output><empty_output><jupyter_text>HyperparametersA greater `queue_size` most likely means better performance as shown in the originalpaper, but introduces significant computational overhead. The authors show that the bestresults of NNCLR are achieved with a queue size of 98,304 (the largest `queue_size` theyexperimented on). We here use 10,000 to show a working example.<jupyter_code>AUTOTUNE = tf.data.AUTOTUNE
shuffle_buffer = 5000
# The below two values are taken from https://www.tensorflow.org/datasets/catalog/stl10
labelled_train_images = 5000
unlabelled_images = 100000
temperature = 0.1
queue_size = 10000
contrastive_augmenter = {
"brightness": 0.5,
"name": "contrastive_augmenter",
"scale": (0.2, 1.0),
}
classification_augmenter = {
"brightness": 0.2,
"name": "classification_augmenter",
"scale": (0.5, 1.0),
}
input_shape = (96, 96, 3)
width = 128
num_epochs = 5 # Use 25 for better results
steps_per_epoch = 50 # Use 200 for better results<jupyter_output><empty_output><jupyter_text>Load the DatasetWe load the [STL-10](http://ai.stanford.edu/~acoates/stl10/) dataset fromTensorFlow Datasets, an image recognition dataset for developing unsupervisedfeature learning, deep learning, self-taught learning algorithms. It is inspired by theCIFAR-10 dataset, with some modifications.<jupyter_code>dataset_name = "stl10"
def prepare_dataset():
unlabeled_batch_size = unlabelled_images // steps_per_epoch
labeled_batch_size = labelled_train_images // steps_per_epoch
batch_size = unlabeled_batch_size + labeled_batch_size
unlabeled_train_dataset = (
tfds.load(
dataset_name, split="unlabelled", as_supervised=True, shuffle_files=True
)
.shuffle(buffer_size=shuffle_buffer)
.batch(unlabeled_batch_size, drop_remainder=True)
)
labeled_train_dataset = (
tfds.load(dataset_name, split="train", as_supervised=True, shuffle_files=True)
.shuffle(buffer_size=shuffle_buffer)
.batch(labeled_batch_size, drop_remainder=True)
)
test_dataset = (
tfds.load(dataset_name, split="test", as_supervised=True)
.batch(batch_size)
.prefetch(buffer_size=AUTOTUNE)
)
train_dataset = tf.data.Dataset.zip(
(unlabeled_train_dataset, labeled_train_dataset)
).prefetch(buffer_size=AUTOTUNE)
return batch_size, train_dataset, labeled_train_dataset, test_dataset
batch_size, train_dataset, labeled_train_dataset, test_dataset = prepare_dataset()<jupyter_output><empty_output><jupyter_text>AugmentationsOther self-supervised techniques like [SimCLR](https://arxiv.org/abs/2002.05709),[BYOL](https://arxiv.org/abs/2006.07733), [SwAV](https://arxiv.org/abs/2006.09882) etc.rely heavily on a well-designed data augmentation pipeline to get the best performance.However, NNCLR is _less_ dependent on complex augmentations as nearest-neighbors alreadyprovide richness in sample variations. A few common techniques often includedaugmentation pipelines are:- Random resized crops- Multiple color distortions- Gaussian blurSince NNCLR is less dependent on complex augmentations, we will only use randomcrops and random brightness for augmenting the input images. Prepare augmentation module<jupyter_code>def augmenter(brightness, name, scale):
return keras.Sequential(
[
layers.Input(shape=input_shape),
layers.Rescaling(1 / 255),
layers.RandomFlip("horizontal"),
keras_cv.layers.RandomCropAndResize(
target_size=(input_shape[0], input_shape[1]),
crop_area_factor=scale,
aspect_ratio_factor=(3 / 4, 4 / 3),
),
keras_cv.layers.RandomBrightness(factor=brightness, value_range=(0.0, 1.0)),
],
name=name,
)<jupyter_output><empty_output><jupyter_text>Encoder architectureUsing a ResNet-50 as the encoder architectureis standard in the literature. In the original paper, the authors use ResNet-50 asthe encoder architecture and spatially average the outputs of ResNet-50. However, keep inmind that more powerful models will not only increase training time but will alsorequire more memory and will limit the maximal batch size you can use. For the purpose ofthis example, we just use four convolutional layers.<jupyter_code>def encoder():
return keras.Sequential(
[
layers.Input(shape=input_shape),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Flatten(),
layers.Dense(width, activation="relu"),
],
name="encoder",
)<jupyter_output><empty_output><jupyter_text>The NNCLR model for contrastive pre-trainingWe train an encoder on unlabeled images with a contrastive loss. A nonlinear projectionhead is attached to the top of the encoder, as it improves the quality of representationsof the encoder.<jupyter_code>class NNCLR(keras.Model):
def __init__(
self,
temperature,
queue_size,
):
super().__init__()
self.probe_accuracy = keras.metrics.SparseCategoricalAccuracy()
self.correlation_accuracy = keras.metrics.SparseCategoricalAccuracy()
self.contrastive_accuracy = keras.metrics.SparseCategoricalAccuracy()
self.probe_loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.contrastive_augmenter = augmenter(**contrastive_augmenter)
self.classification_augmenter = augmenter(**classification_augmenter)
self.encoder = encoder()
self.projection_head = keras.Sequential(
[
layers.Input(shape=(width,)),
layers.Dense(width, activation="relu"),
layers.Dense(width),
],
name="projection_head",
)
self.linear_probe = keras.Sequential(
[layers.Input(shape=(width,)), layers.Dense(10)], name="linear_probe"
)
self.temperature = temperature
feature_dimensions = self.encoder.output_shape[1]
self.feature_queue = keras.Variable(
keras.utils.normalize(
keras.random.normal(shape=(queue_size, feature_dimensions)),
axis=1,
order=2,
),
trainable=False,
)
def compile(self, contrastive_optimizer, probe_optimizer, **kwargs):
super().compile(**kwargs)
self.contrastive_optimizer = contrastive_optimizer
self.probe_optimizer = probe_optimizer
def nearest_neighbour(self, projections):
support_similarities = ops.matmul(projections, ops.transpose(self.feature_queue))
nn_projections = ops.take(
self.feature_queue, ops.argmax(support_similarities, axis=1), axis=0
)
return projections + ops.stop_gradient(nn_projections - projections)
def update_contrastive_accuracy(self, features_1, features_2):
features_1 = keras.utils.normalize(features_1, axis=1, order=2)
features_2 = keras.utils.normalize(features_2, axis=1, order=2)
similarities = ops.matmul(features_1, ops.transpose(features_2))
batch_size = ops.shape(features_1)[0]
contrastive_labels = ops.arange(batch_size)
self.contrastive_accuracy.update_state(
ops.concatenate([contrastive_labels, contrastive_labels], axis=0),
ops.concatenate([similarities, ops.transpose(similarities)], axis=0),
)
def update_correlation_accuracy(self, features_1, features_2):
features_1 = (features_1 - ops.mean(features_1, axis=0)) / ops.std(
features_1, axis=0
)
features_2 = (features_2 - ops.mean(features_2, axis=0)) / ops.std(
features_2, axis=0
)
batch_size = ops.shape(features_1)[0]
cross_correlation = (
ops.matmul(ops.transpose(features_1), features_2) / batch_size
)
feature_dim = ops.shape(features_1)[1]
correlation_labels = ops.arange(feature_dim)
self.correlation_accuracy.update_state(
ops.concatenate([correlation_labels, correlation_labels], axis=0),
ops.concatenate(
[cross_correlation, ops.transpose(cross_correlation)], axis=0
),
)
def contrastive_loss(self, projections_1, projections_2):
projections_1 = keras.utils.normalize(projections_1, axis=1, order=2)
projections_2 = keras.utils.normalize(projections_2, axis=1, order=2)
similarities_1_2_1 = (
ops.matmul(
self.nearest_neighbour(projections_1), ops.transpose(projections_2)
)
/ self.temperature
)
similarities_1_2_2 = (
ops.matmul(
projections_2, ops.transpose(self.nearest_neighbour(projections_1))
)
/ self.temperature
)
similarities_2_1_1 = ( #
ops.matmul(
self.nearest_neighbour(projections_2), ops.transpose(projections_1)
)
/ self.temperature
)
similarities_2_1_2 = (
ops.matmul(
projections_1, ops.transpose(self.nearest_neighbour(projections_2))
)
/ self.temperature
)
batch_size = ops.shape(projections_1)[0]
contrastive_labels = ops.arange(batch_size)
loss = keras.losses.sparse_categorical_crossentropy(
ops.concatenate(
[
contrastive_labels,
contrastive_labels,
contrastive_labels,
contrastive_labels,
],
axis=0,
),
ops.concatenate(
[
similarities_1_2_1,
similarities_1_2_2,
similarities_2_1_1,
similarities_2_1_2,
],
axis=0,
),
from_logits=True,
)
self.feature_queue.assign(
ops.concatenate([projections_1, self.feature_queue[:-batch_size]], axis=0)
)
return loss
def train_step(self, data):
(unlabeled_images, _), (labeled_images, labels) = data
images = ops.concatenate((unlabeled_images, labeled_images), axis=0)
augmented_images_1 = self.contrastive_augmenter(images)
augmented_images_2 = self.contrastive_augmenter(images)
with tf.GradientTape() as tape:
features_1 = self.encoder(augmented_images_1)
features_2 = self.encoder(augmented_images_2)
projections_1 = self.projection_head(features_1)
projections_2 = self.projection_head(features_2)
contrastive_loss = self.contrastive_loss(projections_1, projections_2)
gradients = tape.gradient(
contrastive_loss,
self.encoder.trainable_weights + self.projection_head.trainable_weights,
)
self.contrastive_optimizer.apply_gradients(
zip(
gradients,
self.encoder.trainable_weights + self.projection_head.trainable_weights,
)
)
self.update_contrastive_accuracy(features_1, features_2)
self.update_correlation_accuracy(features_1, features_2)
preprocessed_images = self.classification_augmenter(labeled_images)
with tf.GradientTape() as tape:
features = self.encoder(preprocessed_images)
class_logits = self.linear_probe(features)
probe_loss = self.probe_loss(labels, class_logits)
gradients = tape.gradient(probe_loss, self.linear_probe.trainable_weights)
self.probe_optimizer.apply_gradients(
zip(gradients, self.linear_probe.trainable_weights)
)
self.probe_accuracy.update_state(labels, class_logits)
return {
"c_loss": contrastive_loss,
"c_acc": self.contrastive_accuracy.result(),
"r_acc": self.correlation_accuracy.result(),
"p_loss": probe_loss,
"p_acc": self.probe_accuracy.result(),
}
def test_step(self, data):
labeled_images, labels = data
preprocessed_images = self.classification_augmenter(
labeled_images, training=False
)
features = self.encoder(preprocessed_images, training=False)
class_logits = self.linear_probe(features, training=False)
probe_loss = self.probe_loss(labels, class_logits)
self.probe_accuracy.update_state(labels, class_logits)
return {"p_loss": probe_loss, "p_acc": self.probe_accuracy.result()}<jupyter_output><empty_output><jupyter_text>Pre-train NNCLRWe train the network using a `temperature` of 0.1 as suggested in the paper anda `queue_size` of 10,000 as explained earlier. We use Adam as our contrastive and probeoptimizer. For this example we train the model for only 30 epochs but it should betrained for more epochs for better performance.The following two metrics can be used for monitoring the pretraining performancewhich we also log (taken from[this Keras example](https://keras.io/examples/vision/semisupervised_simclr/selfsupervised-model-for-contrastive-pretraining)):- Contrastive accuracy: self-supervised metric, the ratio of cases in which therepresentation of an image is more similar to its differently augmented version's one,than to the representation of any other image in the current batch. Self-supervisedmetrics can be used for hyperparameter tuning even in the case when there are no labeledexamples.- Linear probing accuracy: linear probing is a popular metric to evaluate self-supervisedclassifiers. It is computed as the accuracy of a logistic regression classifier trainedon top of the encoder's features. In our case, this is done by training a single denselayer on top of the frozen encoder. Note that contrary to traditional approach where theclassifier is trained after the pretraining phase, in this example we train it duringpretraining. This might slightly decrease its accuracy, but that way we can monitor itsvalue during training, which helps with experimentation and debugging.<jupyter_code>model = NNCLR(temperature=temperature, queue_size=queue_size)
model.compile(
contrastive_optimizer=keras.optimizers.Adam(),
probe_optimizer=keras.optimizers.Adam(),
jit_compile=False,
)
pretrain_history = model.fit(
train_dataset, epochs=num_epochs, validation_data=test_dataset
)<jupyter_output><empty_output><jupyter_text>Evaluate our modelA popular way to evaluate a SSL method in computer vision or for that fact any otherpre-training method as such is to learn a linear classifier on the frozen features of thetrained backbone model and evaluate the classifier on unseen images. Other methods ofteninclude fine-tuning on the source dataset or even a target dataset with 5% or 10% labelspresent. You can use the backbone we just trained for any downstream task such as imageclassification (like we do here) or segmentation or detection, where the backbone modelsare usually pre-trained with supervised learning.<jupyter_code>finetuning_model = keras.Sequential(
[
layers.Input(shape=input_shape),
augmenter(**classification_augmenter),
model.encoder,
layers.Dense(10),
],
name="finetuning_model",
)
finetuning_model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
jit_compile=False,
)
finetuning_history = finetuning_model.fit(
labeled_train_dataset, epochs=num_epochs, validation_data=test_dataset
)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/nnclr.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/nnclr.ipynb",
"repo_id": "keras-io",
"token_count": 7497
} | 110 |
# Image classification with ConvMixer
**Author:** [Sayak Paul](https://twitter.com/RisingSayak)<br>
**Date created:** 2021/10/12<br>
**Last modified:** 2021/10/12<br>
**Description:** An all-convolutional network applied to patches of images.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/convmixer.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/convmixer.py)
---
## Introduction
Vision Transformers (ViT; [Dosovitskiy et al.](https://arxiv.org/abs/1612.00593)) extract
small patches from the input images, linearly project them, and then apply the
Transformer ([Vaswani et al.](https://arxiv.org/abs/1706.03762)) blocks. The application
of ViTs to image recognition tasks is quickly becoming a promising area of research,
because ViTs eliminate the need to have strong inductive biases (such as convolutions) for
modeling locality. This presents them as a general computation primititive capable of
learning just from the training data with as minimal inductive priors as possible. ViTs
yield great downstream performance when trained with proper regularization, data
augmentation, and relatively large datasets.
In the [Patches Are All You Need](https://openreview.net/pdf?id=TVHS5Y4dNvM) paper (note:
at
the time of writing, it is a submission to the ICLR 2022 conference), the authors extend
the idea of using patches to train an all-convolutional network and demonstrate
competitive results. Their architecture namely **ConvMixer** uses recipes from the recent
isotrophic architectures like ViT, MLP-Mixer
([Tolstikhin et al.](https://arxiv.org/abs/2105.01601)), such as using the same
depth and resolution across different layers in the network, residual connections,
and so on.
In this example, we will implement the ConvMixer model and demonstrate its performance on
the CIFAR-10 dataset.
---
## Imports
```python
import keras
from keras import layers
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
```
---
## Hyperparameters
To keep run time short, we will train the model for only 10 epochs. To focus on
the core ideas of ConvMixer, we will not use other training-specific elements like
RandAugment ([Cubuk et al.](https://arxiv.org/abs/1909.13719)). If you are interested in
learning more about those details, please refer to the
[original paper](https://openreview.net/pdf?id=TVHS5Y4dNvM).
```python
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 128
num_epochs = 10
```
---
## Load the CIFAR-10 dataset
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
val_split = 0.1
val_indices = int(len(x_train) * val_split)
new_x_train, new_y_train = x_train[val_indices:], y_train[val_indices:]
x_val, y_val = x_train[:val_indices], y_train[:val_indices]
print(f"Training data samples: {len(new_x_train)}")
print(f"Validation data samples: {len(x_val)}")
print(f"Test data samples: {len(x_test)}")
```
<div class="k-default-codeblock">
```
Training data samples: 45000
Validation data samples: 5000
Test data samples: 10000
```
</div>
---
## Prepare `tf.data.Dataset` objects
Our data augmentation pipeline is different from what the authors used for the CIFAR-10
dataset, which is fine for the purpose of the example.
Note that, it's ok to use **TF APIs for data I/O and preprocessing** with other backends
(jax, torch) as it is feature-complete framework when it comes to data preprocessing.
```python
image_size = 32
auto = tf.data.AUTOTUNE
augmentation_layers = [
keras.layers.RandomCrop(image_size, image_size),
keras.layers.RandomFlip("horizontal"),
]
def augment_images(images):
for layer in augmentation_layers:
images = layer(images, training=True)
return images
def make_datasets(images, labels, is_train=False):
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if is_train:
dataset = dataset.shuffle(batch_size * 10)
dataset = dataset.batch(batch_size)
if is_train:
dataset = dataset.map(
lambda x, y: (augment_images(x), y), num_parallel_calls=auto
)
return dataset.prefetch(auto)
train_dataset = make_datasets(new_x_train, new_y_train, is_train=True)
val_dataset = make_datasets(x_val, y_val)
test_dataset = make_datasets(x_test, y_test)
```
---
## ConvMixer utilities
The following figure (taken from the original paper) depicts the ConvMixer model:

ConvMixer is very similar to the MLP-Mixer, model with the following key
differences:
* Instead of using fully-connected layers, it uses standard convolution layers.
* Instead of LayerNorm (which is typical for ViTs and MLP-Mixers), it uses BatchNorm.
Two types of convolution layers are used in ConvMixer. **(1)**: Depthwise convolutions,
for mixing spatial locations of the images, **(2)**: Pointwise convolutions (which follow
the depthwise convolutions), for mixing channel-wise information across the patches.
Another keypoint is the use of *larger kernel sizes* to allow a larger receptive field.
```python
def activation_block(x):
x = layers.Activation("gelu")(x)
return layers.BatchNormalization()(x)
def conv_stem(x, filters: int, patch_size: int):
x = layers.Conv2D(filters, kernel_size=patch_size, strides=patch_size)(x)
return activation_block(x)
def conv_mixer_block(x, filters: int, kernel_size: int):
# Depthwise convolution.
x0 = x
x = layers.DepthwiseConv2D(kernel_size=kernel_size, padding="same")(x)
x = layers.Add()([activation_block(x), x0]) # Residual.
# Pointwise convolution.
x = layers.Conv2D(filters, kernel_size=1)(x)
x = activation_block(x)
return x
def get_conv_mixer_256_8(
image_size=32, filters=256, depth=8, kernel_size=5, patch_size=2, num_classes=10
):
"""ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM.
The hyperparameter values are taken from the paper.
"""
inputs = keras.Input((image_size, image_size, 3))
x = layers.Rescaling(scale=1.0 / 255)(inputs)
# Extract patch embeddings.
x = conv_stem(x, filters, patch_size)
# ConvMixer blocks.
for _ in range(depth):
x = conv_mixer_block(x, filters, kernel_size)
# Classification block.
x = layers.GlobalAvgPool2D()(x)
outputs = layers.Dense(num_classes, activation="softmax")(x)
return keras.Model(inputs, outputs)
```
The model used in this experiment is termed as **ConvMixer-256/8** where 256 denotes the
number of channels and 8 denotes the depth. The resulting model only has 0.8 million
parameters.
---
## Model training and evaluation utility
```python
# Code reference:
# https://keras.io/examples/vision/image_classification_with_vision_transformer/.
def run_experiment(model):
optimizer = keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
)
model.compile(
optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
checkpoint_filepath = "/tmp/checkpoint.keras"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_accuracy",
save_best_only=True,
save_weights_only=False,
)
history = model.fit(
train_dataset,
validation_data=val_dataset,
epochs=num_epochs,
callbacks=[checkpoint_callback],
)
model.load_weights(checkpoint_filepath)
_, accuracy = model.evaluate(test_dataset)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
return history, model
```
---
## Train and evaluate model
```python
conv_mixer_model = get_conv_mixer_256_8()
history, conv_mixer_model = run_experiment(conv_mixer_model)
```
<div class="k-default-codeblock">
```
Epoch 1/10
352/352 ━━━━━━━━━━━━━━━━━━━━ 46s 103ms/step - accuracy: 0.4594 - loss: 1.4780 - val_accuracy: 0.1536 - val_loss: 4.0766
Epoch 2/10
352/352 ━━━━━━━━━━━━━━━━━━━━ 14s 39ms/step - accuracy: 0.6996 - loss: 0.8479 - val_accuracy: 0.7240 - val_loss: 0.7926
Epoch 3/10
352/352 ━━━━━━━━━━━━━━━━━━━━ 14s 39ms/step - accuracy: 0.7823 - loss: 0.6287 - val_accuracy: 0.7800 - val_loss: 0.6532
Epoch 4/10
352/352 ━━━━━━━━━━━━━━━━━━━━ 14s 39ms/step - accuracy: 0.8264 - loss: 0.5003 - val_accuracy: 0.8074 - val_loss: 0.5895
Epoch 5/10
352/352 ━━━━━━━━━━━━━━━━━━━━ 21s 60ms/step - accuracy: 0.8605 - loss: 0.4092 - val_accuracy: 0.7996 - val_loss: 0.6037
Epoch 6/10
352/352 ━━━━━━━━━━━━━━━━━━━━ 13s 38ms/step - accuracy: 0.8788 - loss: 0.3527 - val_accuracy: 0.8072 - val_loss: 0.6162
Epoch 7/10
352/352 ━━━━━━━━━━━━━━━━━━━━ 21s 61ms/step - accuracy: 0.8972 - loss: 0.2984 - val_accuracy: 0.8226 - val_loss: 0.5604
Epoch 8/10
352/352 ━━━━━━━━━━━━━━━━━━━━ 21s 61ms/step - accuracy: 0.9087 - loss: 0.2608 - val_accuracy: 0.8310 - val_loss: 0.5303
Epoch 9/10
352/352 ━━━━━━━━━━━━━━━━━━━━ 14s 39ms/step - accuracy: 0.9176 - loss: 0.2302 - val_accuracy: 0.8458 - val_loss: 0.5051
Epoch 10/10
352/352 ━━━━━━━━━━━━━━━━━━━━ 14s 38ms/step - accuracy: 0.9336 - loss: 0.1918 - val_accuracy: 0.8316 - val_loss: 0.5848
79/79 ━━━━━━━━━━━━━━━━━━━━ 3s 32ms/step - accuracy: 0.8371 - loss: 0.5501
Test accuracy: 83.69%
```
</div>
The gap in training and validation performance can be mitigated by using additional
regularization techniques. Nevertheless, being able to get to ~83% accuracy within 10
epochs with 0.8 million parameters is a strong result.
---
## Visualizing the internals of ConvMixer
We can visualize the patch embeddings and the learned convolution filters. Recall
that each patch embedding and intermediate feature map have the same number of channels
(256 in this case). This will make our visualization utility easier to implement.
```python
# Code reference: https://bit.ly/3awIRbP.
def visualization_plot(weights, idx=1):
# First, apply min-max normalization to the
# given weights to avoid isotrophic scaling.
p_min, p_max = weights.min(), weights.max()
weights = (weights - p_min) / (p_max - p_min)
# Visualize all the filters.
num_filters = 256
plt.figure(figsize=(8, 8))
for i in range(num_filters):
current_weight = weights[:, :, :, i]
if current_weight.shape[-1] == 1:
current_weight = current_weight.squeeze()
ax = plt.subplot(16, 16, idx)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(current_weight)
idx += 1
# We first visualize the learned patch embeddings.
patch_embeddings = conv_mixer_model.layers[2].get_weights()[0]
visualization_plot(patch_embeddings)
```

Even though we did not train the network to convergence, we can notice that different
patches show different patterns. Some share similarity with others while some are very
different. These visualizations are more salient with larger image sizes.
Similarly, we can visualize the raw convolution kernels. This can help us understand
the patterns to which a given kernel is receptive.
```python
# First, print the indices of the convolution layers that are not
# pointwise convolutions.
for i, layer in enumerate(conv_mixer_model.layers):
if isinstance(layer, layers.DepthwiseConv2D):
if layer.get_config()["kernel_size"] == (5, 5):
print(i, layer)
idx = 26 # Taking a kernel from the middle of the network.
kernel = conv_mixer_model.layers[idx].get_weights()[0]
kernel = np.expand_dims(kernel.squeeze(), axis=2)
visualization_plot(kernel)
```
<div class="k-default-codeblock">
```
5 <DepthwiseConv2D name=depthwise_conv2d, built=True>
12 <DepthwiseConv2D name=depthwise_conv2d_1, built=True>
19 <DepthwiseConv2D name=depthwise_conv2d_2, built=True>
26 <DepthwiseConv2D name=depthwise_conv2d_3, built=True>
33 <DepthwiseConv2D name=depthwise_conv2d_4, built=True>
40 <DepthwiseConv2D name=depthwise_conv2d_5, built=True>
47 <DepthwiseConv2D name=depthwise_conv2d_6, built=True>
54 <DepthwiseConv2D name=depthwise_conv2d_7, built=True>
```
</div>

We see that different filters in the kernel have different locality spans, and this
pattern
is likely to evolve with more training.
---
## Final notes
There's been a recent trend on fusing convolutions with other data-agnostic operations
like self-attention. Following works are along this line of research:
* ConViT ([d'Ascoli et al.](https://arxiv.org/abs/2103.10697))
* CCT ([Hassani et al.](https://arxiv.org/abs/2104.05704))
* CoAtNet ([Dai et al.](https://arxiv.org/abs/2106.04803))
| keras-io/examples/vision/md/convmixer.md/0 | {
"file_path": "keras-io/examples/vision/md/convmixer.md",
"repo_id": "keras-io",
"token_count": 4781
} | 111 |
# Image classification from scratch
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2020/04/27<br>
**Last modified:** 2023/11/09<br>
**Description:** Training an image classifier from scratch on the Kaggle Cats vs Dogs dataset.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/image_classification_from_scratch.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/image_classification_from_scratch.py)
---
## Introduction
This example shows how to do image classification from scratch, starting from JPEG
image files on disk, without leveraging pre-trained weights or a pre-made Keras
Application model. We demonstrate the workflow on the Kaggle Cats vs Dogs binary
classification dataset.
We use the `image_dataset_from_directory` utility to generate the datasets, and
we use Keras image preprocessing layers for image standardization and data augmentation.
---
## Setup
```python
import os
import numpy as np
import keras
from keras import layers
from tensorflow import data as tf_data
import matplotlib.pyplot as plt
```
---
## Load the data: the Cats vs Dogs dataset
### Raw data download
First, let's download the 786M ZIP archive of the raw data:
```python
!curl -O https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_5340.zip
```
```python
!unzip -q kagglecatsanddogs_5340.zip
!ls
```
<div class="k-default-codeblock">
```
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 786M 100 786M 0 0 11.1M 0 0:01:10 0:01:10 --:--:-- 11.8M
CDLA-Permissive-2.0.pdf kagglecatsanddogs_5340.zip
PetImages 'readme[1].txt'
image_classification_from_scratch.ipynb
```
</div>
Now we have a `PetImages` folder which contain two subfolders, `Cat` and `Dog`. Each
subfolder contains image files for each category.
```python
!ls PetImages
```
<div class="k-default-codeblock">
```
Cat Dog
```
</div>
### Filter out corrupted images
When working with lots of real-world image data, corrupted images are a common
occurence. Let's filter out badly-encoded images that do not feature the string "JFIF"
in their header.
```python
num_skipped = 0
for folder_name in ("Cat", "Dog"):
folder_path = os.path.join("PetImages", folder_name)
for fname in os.listdir(folder_path):
fpath = os.path.join(folder_path, fname)
try:
fobj = open(fpath, "rb")
is_jfif = b"JFIF" in fobj.peek(10)
finally:
fobj.close()
if not is_jfif:
num_skipped += 1
# Delete corrupted image
os.remove(fpath)
print(f"Deleted {num_skipped} images.")
```
<div class="k-default-codeblock">
```
Deleted 1590 images.
```
</div>
---
## Generate a `Dataset`
```python
image_size = (180, 180)
batch_size = 128
train_ds, val_ds = keras.utils.image_dataset_from_directory(
"PetImages",
validation_split=0.2,
subset="both",
seed=1337,
image_size=image_size,
batch_size=batch_size,
)
```
<div class="k-default-codeblock">
```
Found 23410 files belonging to 2 classes.
Using 18728 files for training.
Using 4682 files for validation.
```
</div>
---
## Visualize the data
Here are the first 9 images in the training dataset.
```python
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(np.array(images[i]).astype("uint8"))
plt.title(int(labels[i]))
plt.axis("off")
```

---
## Using image data augmentation
When you don't have a large image dataset, it's a good practice to artificially
introduce sample diversity by applying random yet realistic transformations to the
training images, such as random horizontal flipping or small random rotations. This
helps expose the model to different aspects of the training data while slowing down
overfitting.
```python
data_augmentation_layers = [
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
]
def data_augmentation(images):
for layer in data_augmentation_layers:
images = layer(images)
return images
```
Let's visualize what the augmented samples look like, by applying `data_augmentation`
repeatedly to the first few images in the dataset:
```python
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
for i in range(9):
augmented_images = data_augmentation(images)
ax = plt.subplot(3, 3, i + 1)
plt.imshow(np.array(augmented_images[0]).astype("uint8"))
plt.axis("off")
```

---
## Standardizing the data
Our image are already in a standard size (180x180), as they are being yielded as
contiguous `float32` batches by our dataset. However, their RGB channel values are in
the `[0, 255]` range. This is not ideal for a neural network;
in general you should seek to make your input values small. Here, we will
standardize values to be in the `[0, 1]` by using a `Rescaling` layer at the start of
our model.
---
## Two options to preprocess the data
There are two ways you could be using the `data_augmentation` preprocessor:
**Option 1: Make it part of the model**, like this:
```python
inputs = keras.Input(shape=input_shape)
x = data_augmentation(inputs)
x = layers.Rescaling(1./255)(x)
... # Rest of the model
```
With this option, your data augmentation will happen *on device*, synchronously
with the rest of the model execution, meaning that it will benefit from GPU
acceleration.
Note that data augmentation is inactive at test time, so the input samples will only be
augmented during `fit()`, not when calling `evaluate()` or `predict()`.
If you're training on GPU, this may be a good option.
**Option 2: apply it to the dataset**, so as to obtain a dataset that yields batches of
augmented images, like this:
```python
augmented_train_ds = train_ds.map(
lambda x, y: (data_augmentation(x, training=True), y))
```
With this option, your data augmentation will happen **on CPU**, asynchronously, and will
be buffered before going into the model.
If you're training on CPU, this is the better option, since it makes data augmentation
asynchronous and non-blocking.
In our case, we'll go with the second option. If you're not sure
which one to pick, this second option (asynchronous preprocessing) is always a solid choice.
---
## Configure the dataset for performance
Let's apply data augmentation to our training dataset,
and let's make sure to use buffered prefetching so we can yield data from disk without
having I/O becoming blocking:
```python
# Apply `data_augmentation` to the training images.
train_ds = train_ds.map(
lambda img, label: (data_augmentation(img), label),
num_parallel_calls=tf_data.AUTOTUNE,
)
# Prefetching samples in GPU memory helps maximize GPU utilization.
train_ds = train_ds.prefetch(tf_data.AUTOTUNE)
val_ds = val_ds.prefetch(tf_data.AUTOTUNE)
```
---
## Build a model
We'll build a small version of the Xception network. We haven't particularly tried to
optimize the architecture; if you want to do a systematic search for the best model
configuration, consider using
[KerasTuner](https://github.com/keras-team/keras-tuner).
Note that:
- We start the model with the `data_augmentation` preprocessor, followed by a
`Rescaling` layer.
- We include a `Dropout` layer before the final classification layer.
```python
def make_model(input_shape, num_classes):
inputs = keras.Input(shape=input_shape)
# Entry block
x = layers.Rescaling(1.0 / 255)(inputs)
x = layers.Conv2D(128, 3, strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
for size in [256, 512, 728]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(size, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
x = layers.SeparableConv2D(1024, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.GlobalAveragePooling2D()(x)
if num_classes == 2:
units = 1
else:
units = num_classes
x = layers.Dropout(0.25)(x)
# We specify activation=None so as to return logits
outputs = layers.Dense(units, activation=None)(x)
return keras.Model(inputs, outputs)
model = make_model(input_shape=image_size + (3,), num_classes=2)
keras.utils.plot_model(model, show_shapes=True)
```

---
## Train the model
```python
epochs = 25
callbacks = [
keras.callbacks.ModelCheckpoint("save_at_{epoch}.keras"),
]
model.compile(
optimizer=keras.optimizers.Adam(3e-4),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy(name="acc")],
)
model.fit(
train_ds,
epochs=epochs,
callbacks=callbacks,
validation_data=val_ds,
)
```
<div class="k-default-codeblock">
```
Epoch 1/25
...
Epoch 25/25
147/147 ━━━━━━━━━━━━━━━━━━━━ 53s 354ms/step - acc: 0.9638 - loss: 0.0903 - val_acc: 0.9382 - val_loss: 0.1542
<keras.src.callbacks.history.History at 0x7f41003c24a0>
```
</div>
We get to >90% validation accuracy after training for 25 epochs on the full dataset
(in practice, you can train for 50+ epochs before validation performance starts degrading).
---
## Run inference on new data
Note that data augmentation and dropout are inactive at inference time.
```python
img = keras.utils.load_img("PetImages/Cat/6779.jpg", target_size=image_size)
plt.imshow(img)
img_array = keras.utils.img_to_array(img)
img_array = keras.ops.expand_dims(img_array, 0) # Create batch axis
predictions = model.predict(img_array)
score = float(keras.ops.sigmoid(predictions[0][0]))
print(f"This image is {100 * (1 - score):.2f}% cat and {100 * score:.2f}% dog.")
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 2s 2s/step
This image is 94.30% cat and 5.70% dog.
```
</div>

| keras-io/examples/vision/md/image_classification_from_scratch.md/0 | {
"file_path": "keras-io/examples/vision/md/image_classification_from_scratch.md",
"repo_id": "keras-io",
"token_count": 4166
} | 112 |
# Pneumonia Classification on TPU
**Author:** Amy MiHyun Jang<br>
**Date created:** 2020/07/28<br>
**Last modified:** 2024/02/12<br>
**Description:** Medical image classification on TPU.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/xray_classification_with_tpus.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/xray_classification_with_tpus.py)
---
## Introduction + Set-up
This tutorial will explain how to build an X-ray image classification model
to predict whether an X-ray scan shows presence of pneumonia.
```python
import re
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
print("Device:", tpu.master())
strategy = tf.distribute.TPUStrategy(tpu)
except:
strategy = tf.distribute.get_strategy()
print("Number of replicas:", strategy.num_replicas_in_sync)
```
<div class="k-default-codeblock">
```
Device: grpc://10.0.27.122:8470
INFO:tensorflow:Initializing the TPU system: grpc://10.0.27.122:8470
INFO:tensorflow:Initializing the TPU system: grpc://10.0.27.122:8470
INFO:tensorflow:Clearing out eager caches
INFO:tensorflow:Clearing out eager caches
INFO:tensorflow:Finished initializing TPU system.
INFO:tensorflow:Finished initializing TPU system.
WARNING:absl:`tf.distribute.TPUStrategy` is deprecated, please use the non experimental symbol `tf.distribute.TPUStrategy` instead.
INFO:tensorflow:Found TPU system:
INFO:tensorflow:Found TPU system:
INFO:tensorflow:*** Num TPU Cores: 8
INFO:tensorflow:*** Num TPU Cores: 8
INFO:tensorflow:*** Num TPU Workers: 1
INFO:tensorflow:*** Num TPU Workers: 1
INFO:tensorflow:*** Num TPU Cores Per Worker: 8
INFO:tensorflow:*** Num TPU Cores Per Worker: 8
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:localhost/replica:0/task:0/device:CPU:0, CPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:localhost/replica:0/task:0/device:CPU:0, CPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:localhost/replica:0/task:0/device:XLA_CPU:0, XLA_CPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:localhost/replica:0/task:0/device:XLA_CPU:0, XLA_CPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:CPU:0, CPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:CPU:0, CPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:0, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:0, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:1, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:1, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:2, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:2, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:3, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:3, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:4, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:4, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:5, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:5, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:6, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:6, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:7, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:7, TPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU_SYSTEM:0, TPU_SYSTEM, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU_SYSTEM:0, TPU_SYSTEM, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:XLA_CPU:0, XLA_CPU, 0, 0)
INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:XLA_CPU:0, XLA_CPU, 0, 0)
Number of replicas: 8
```
</div>
We need a Google Cloud link to our data to load the data using a TPU.
Below, we define key configuration parameters we'll use in this example.
To run on TPU, this example must be on Colab with the TPU runtime selected.
```python
AUTOTUNE = tf.data.AUTOTUNE
BATCH_SIZE = 25 * strategy.num_replicas_in_sync
IMAGE_SIZE = [180, 180]
CLASS_NAMES = ["NORMAL", "PNEUMONIA"]
```
---
## Load the data
The Chest X-ray data we are using from
[*Cell*](https://www.cell.com/cell/fulltext/S0092-8674(18)30154-5) divides the data into
training and test files. Let's first load in the training TFRecords.
```python
train_images = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/train/images.tfrec"
)
train_paths = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/train/paths.tfrec"
)
ds = tf.data.Dataset.zip((train_images, train_paths))
```
Let's count how many healthy/normal chest X-rays we have and how many
pneumonia chest X-rays we have:
```python
COUNT_NORMAL = len(
[
filename
for filename in train_paths
if "NORMAL" in filename.numpy().decode("utf-8")
]
)
print("Normal images count in training set: " + str(COUNT_NORMAL))
COUNT_PNEUMONIA = len(
[
filename
for filename in train_paths
if "PNEUMONIA" in filename.numpy().decode("utf-8")
]
)
print("Pneumonia images count in training set: " + str(COUNT_PNEUMONIA))
```
<div class="k-default-codeblock">
```
Normal images count in training set: 1349
Pneumonia images count in training set: 3883
```
</div>
Notice that there are way more images that are classified as pneumonia than normal. This
shows that we have an imbalance in our data. We will correct for this imbalance later on
in our notebook.
We want to map each filename to the corresponding (image, label) pair. The following
methods will help us do that.
As we only have two labels, we will encode the label so that `1` or `True` indicates
pneumonia and `0` or `False` indicates normal.
```python
def get_label(file_path):
# convert the path to a list of path components
parts = tf.strings.split(file_path, "/")
# The second to last is the class-directory
if parts[-2] == "PNEUMONIA":
return 1
else:
return 0
def decode_img(img):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# resize the image to the desired size.
return tf.image.resize(img, IMAGE_SIZE)
def process_path(image, path):
label = get_label(path)
# load the raw data from the file as a string
img = decode_img(image)
return img, label
ds = ds.map(process_path, num_parallel_calls=AUTOTUNE)
```
Let's split the data into a training and validation datasets.
```python
ds = ds.shuffle(10000)
train_ds = ds.take(4200)
val_ds = ds.skip(4200)
```
Let's visualize the shape of an (image, label) pair.
```python
for image, label in train_ds.take(1):
print("Image shape: ", image.numpy().shape)
print("Label: ", label.numpy())
```
<div class="k-default-codeblock">
```
Image shape: (180, 180, 3)
Label: False
```
</div>
Load and format the test data as well.
```python
test_images = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/test/images.tfrec"
)
test_paths = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/test/paths.tfrec"
)
test_ds = tf.data.Dataset.zip((test_images, test_paths))
test_ds = test_ds.map(process_path, num_parallel_calls=AUTOTUNE)
test_ds = test_ds.batch(BATCH_SIZE)
```
---
## Visualize the dataset
First, let's use buffered prefetching so we can yield data from disk without having I/O
become blocking.
Please note that large image datasets should not be cached in memory. We do it here
because the dataset is not very large and we want to train on TPU.
```python
def prepare_for_training(ds, cache=True):
# This is a small dataset, only load it once, and keep it in memory.
# use `.cache(filename)` to cache preprocessing work for datasets that don't
# fit in memory.
if cache:
if isinstance(cache, str):
ds = ds.cache(cache)
else:
ds = ds.cache()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches in the background while the model
# is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
```
Call the next batch iteration of the training data.
```python
train_ds = prepare_for_training(train_ds)
val_ds = prepare_for_training(val_ds)
image_batch, label_batch = next(iter(train_ds))
```
Define the method to show the images in the batch.
```python
def show_batch(image_batch, label_batch):
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(image_batch[n] / 255)
if label_batch[n]:
plt.title("PNEUMONIA")
else:
plt.title("NORMAL")
plt.axis("off")
```
As the method takes in NumPy arrays as its parameters, call the numpy function on the
batches to return the tensor in NumPy array form.
```python
show_batch(image_batch.numpy(), label_batch.numpy())
```

---
## Build the CNN
To make our model more modular and easier to understand, let's define some blocks. As
we're building a convolution neural network, we'll create a convolution block and a dense
layer block.
The architecture for this CNN has been inspired by this
[article](https://towardsdatascience.com/deep-learning-for-detecting-pneumonia-from-x-ray-images-fc9a3d9fdba8).
```python
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras
from keras import layers
def conv_block(filters, inputs):
x = layers.SeparableConv2D(filters, 3, activation="relu", padding="same")(inputs)
x = layers.SeparableConv2D(filters, 3, activation="relu", padding="same")(x)
x = layers.BatchNormalization()(x)
outputs = layers.MaxPool2D()(x)
return outputs
def dense_block(units, dropout_rate, inputs):
x = layers.Dense(units, activation="relu")(inputs)
x = layers.BatchNormalization()(x)
outputs = layers.Dropout(dropout_rate)(x)
return outputs
```
The following method will define the function to build our model for us.
The images originally have values that range from [0, 255]. CNNs work better with smaller
numbers so we will scale this down for our input.
The Dropout layers are important, as they
reduce the likelikhood of the model overfitting. We want to end the model with a `Dense`
layer with one node, as this will be the binary output that determines if an X-ray shows
presence of pneumonia.
```python
def build_model():
inputs = keras.Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
x = layers.Rescaling(1.0 / 255)(inputs)
x = layers.Conv2D(16, 3, activation="relu", padding="same")(x)
x = layers.Conv2D(16, 3, activation="relu", padding="same")(x)
x = layers.MaxPool2D()(x)
x = conv_block(32, x)
x = conv_block(64, x)
x = conv_block(128, x)
x = layers.Dropout(0.2)(x)
x = conv_block(256, x)
x = layers.Dropout(0.2)(x)
x = layers.Flatten()(x)
x = dense_block(512, 0.7, x)
x = dense_block(128, 0.5, x)
x = dense_block(64, 0.3, x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
```
---
## Correct for data imbalance
We saw earlier in this example that the data was imbalanced, with more images classified
as pneumonia than normal. We will correct for that by using class weighting:
```python
initial_bias = np.log([COUNT_PNEUMONIA / COUNT_NORMAL])
print("Initial bias: {:.5f}".format(initial_bias[0]))
TRAIN_IMG_COUNT = COUNT_NORMAL + COUNT_PNEUMONIA
weight_for_0 = (1 / COUNT_NORMAL) * (TRAIN_IMG_COUNT) / 2.0
weight_for_1 = (1 / COUNT_PNEUMONIA) * (TRAIN_IMG_COUNT) / 2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
print("Weight for class 0: {:.2f}".format(weight_for_0))
print("Weight for class 1: {:.2f}".format(weight_for_1))
```
<div class="k-default-codeblock">
```
Initial bias: 1.05724
Weight for class 0: 1.94
Weight for class 1: 0.67
```
</div>
The weight for class `0` (Normal) is a lot higher than the weight for class `1`
(Pneumonia). Because there are less normal images, each normal image will be weighted
more to balance the data as the CNN works best when the training data is balanced.
---
## Train the model
### Defining callbacks
The checkpoint callback saves the best weights of the model, so next time we want to use
the model, we do not have to spend time training it. The early stopping callback stops
the training process when the model starts becoming stagnant, or even worse, when the
model starts overfitting.
```python
checkpoint_cb = keras.callbacks.ModelCheckpoint("xray_model.keras", save_best_only=True)
early_stopping_cb = keras.callbacks.EarlyStopping(
patience=10, restore_best_weights=True
)
```
We also want to tune our learning rate. Too high of a learning rate will cause the model
to diverge. Too small of a learning rate will cause the model to be too slow. We
implement the exponential learning rate scheduling method below.
```python
initial_learning_rate = 0.015
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
```
### Fit the model
For our metrics, we want to include precision and recall as they will provide use with a
more informed picture of how good our model is. Accuracy tells us what fraction of the
labels is correct. Since our data is not balanced, accuracy might give a skewed sense of
a good model (i.e. a model that always predicts PNEUMONIA will be 74% accurate but is not
a good model).
Precision is the number of true positives (TP) over the sum of TP and false positives
(FP). It shows what fraction of labeled positives are actually correct.
Recall is the number of TP over the sum of TP and false negatves (FN). It shows what
fraction of actual positives are correct.
Since there are only two possible labels for the image, we will be using the
binary crossentropy loss. When we fit the model, remember to specify the class weights,
which we defined earlier. Because we are using a TPU, training will be quick - less than
2 minutes.
```python
with strategy.scope():
model = build_model()
METRICS = [
keras.metrics.BinaryAccuracy(),
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
]
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=lr_schedule),
loss="binary_crossentropy",
metrics=METRICS,
)
history = model.fit(
train_ds,
epochs=100,
validation_data=val_ds,
class_weight=class_weight,
callbacks=[checkpoint_cb, early_stopping_cb],
)
```
<div class="k-default-codeblock">
```
Epoch 1/100
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Iterator.get_next_as_optional()` instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Iterator.get_next_as_optional()` instead.
21/21 [==============================] - 12s 568ms/step - loss: 0.5857 - binary_accuracy: 0.6960 - precision: 0.8887 - recall: 0.6733 - val_loss: 34.0149 - val_binary_accuracy: 0.7180 - val_precision: 0.7180 - val_recall: 1.0000
Epoch 2/100
21/21 [==============================] - 3s 128ms/step - loss: 0.2916 - binary_accuracy: 0.8755 - precision: 0.9540 - recall: 0.8738 - val_loss: 97.5194 - val_binary_accuracy: 0.7180 - val_precision: 0.7180 - val_recall: 1.0000
Epoch 3/100
21/21 [==============================] - 4s 167ms/step - loss: 0.2384 - binary_accuracy: 0.9002 - precision: 0.9663 - recall: 0.8964 - val_loss: 27.7902 - val_binary_accuracy: 0.7180 - val_precision: 0.7180 - val_recall: 1.0000
Epoch 4/100
21/21 [==============================] - 4s 173ms/step - loss: 0.2046 - binary_accuracy: 0.9145 - precision: 0.9725 - recall: 0.9102 - val_loss: 10.8302 - val_binary_accuracy: 0.7180 - val_precision: 0.7180 - val_recall: 1.0000
Epoch 5/100
21/21 [==============================] - 4s 174ms/step - loss: 0.1841 - binary_accuracy: 0.9279 - precision: 0.9733 - recall: 0.9279 - val_loss: 3.5860 - val_binary_accuracy: 0.7103 - val_precision: 0.7162 - val_recall: 0.9879
Epoch 6/100
21/21 [==============================] - 4s 185ms/step - loss: 0.1600 - binary_accuracy: 0.9362 - precision: 0.9791 - recall: 0.9337 - val_loss: 0.3014 - val_binary_accuracy: 0.8895 - val_precision: 0.8973 - val_recall: 0.9555
Epoch 7/100
21/21 [==============================] - 3s 130ms/step - loss: 0.1567 - binary_accuracy: 0.9393 - precision: 0.9798 - recall: 0.9372 - val_loss: 0.6763 - val_binary_accuracy: 0.7810 - val_precision: 0.7760 - val_recall: 0.9771
Epoch 8/100
21/21 [==============================] - 3s 131ms/step - loss: 0.1532 - binary_accuracy: 0.9421 - precision: 0.9825 - recall: 0.9385 - val_loss: 0.3169 - val_binary_accuracy: 0.8895 - val_precision: 0.8684 - val_recall: 0.9973
Epoch 9/100
21/21 [==============================] - 4s 184ms/step - loss: 0.1457 - binary_accuracy: 0.9431 - precision: 0.9822 - recall: 0.9401 - val_loss: 0.2064 - val_binary_accuracy: 0.9273 - val_precision: 0.9840 - val_recall: 0.9136
Epoch 10/100
21/21 [==============================] - 3s 132ms/step - loss: 0.1201 - binary_accuracy: 0.9521 - precision: 0.9869 - recall: 0.9479 - val_loss: 0.4364 - val_binary_accuracy: 0.8605 - val_precision: 0.8443 - val_recall: 0.9879
Epoch 11/100
21/21 [==============================] - 3s 127ms/step - loss: 0.1200 - binary_accuracy: 0.9510 - precision: 0.9863 - recall: 0.9469 - val_loss: 0.5197 - val_binary_accuracy: 0.8508 - val_precision: 1.0000 - val_recall: 0.7922
Epoch 12/100
21/21 [==============================] - 4s 186ms/step - loss: 0.1077 - binary_accuracy: 0.9581 - precision: 0.9870 - recall: 0.9559 - val_loss: 0.1349 - val_binary_accuracy: 0.9486 - val_precision: 0.9587 - val_recall: 0.9703
Epoch 13/100
21/21 [==============================] - 4s 173ms/step - loss: 0.0918 - binary_accuracy: 0.9650 - precision: 0.9914 - recall: 0.9611 - val_loss: 0.0926 - val_binary_accuracy: 0.9700 - val_precision: 0.9837 - val_recall: 0.9744
Epoch 14/100
21/21 [==============================] - 3s 130ms/step - loss: 0.0996 - binary_accuracy: 0.9612 - precision: 0.9913 - recall: 0.9559 - val_loss: 0.1811 - val_binary_accuracy: 0.9419 - val_precision: 0.9956 - val_recall: 0.9231
Epoch 15/100
21/21 [==============================] - 3s 129ms/step - loss: 0.0898 - binary_accuracy: 0.9643 - precision: 0.9901 - recall: 0.9614 - val_loss: 0.1525 - val_binary_accuracy: 0.9486 - val_precision: 0.9986 - val_recall: 0.9298
Epoch 16/100
21/21 [==============================] - 3s 128ms/step - loss: 0.0941 - binary_accuracy: 0.9621 - precision: 0.9904 - recall: 0.9582 - val_loss: 0.5101 - val_binary_accuracy: 0.8527 - val_precision: 1.0000 - val_recall: 0.7949
Epoch 17/100
21/21 [==============================] - 3s 125ms/step - loss: 0.0798 - binary_accuracy: 0.9636 - precision: 0.9897 - recall: 0.9607 - val_loss: 0.1239 - val_binary_accuracy: 0.9622 - val_precision: 0.9875 - val_recall: 0.9595
Epoch 18/100
21/21 [==============================] - 3s 126ms/step - loss: 0.0821 - binary_accuracy: 0.9657 - precision: 0.9911 - recall: 0.9623 - val_loss: 0.1597 - val_binary_accuracy: 0.9322 - val_precision: 0.9956 - val_recall: 0.9096
Epoch 19/100
21/21 [==============================] - 3s 143ms/step - loss: 0.0800 - binary_accuracy: 0.9657 - precision: 0.9917 - recall: 0.9617 - val_loss: 0.2538 - val_binary_accuracy: 0.9109 - val_precision: 1.0000 - val_recall: 0.8758
Epoch 20/100
21/21 [==============================] - 3s 127ms/step - loss: 0.0605 - binary_accuracy: 0.9738 - precision: 0.9950 - recall: 0.9694 - val_loss: 0.6594 - val_binary_accuracy: 0.8566 - val_precision: 1.0000 - val_recall: 0.8003
Epoch 21/100
21/21 [==============================] - 4s 167ms/step - loss: 0.0726 - binary_accuracy: 0.9733 - precision: 0.9937 - recall: 0.9701 - val_loss: 0.0593 - val_binary_accuracy: 0.9816 - val_precision: 0.9945 - val_recall: 0.9798
Epoch 22/100
21/21 [==============================] - 3s 126ms/step - loss: 0.0577 - binary_accuracy: 0.9783 - precision: 0.9951 - recall: 0.9755 - val_loss: 0.1087 - val_binary_accuracy: 0.9729 - val_precision: 0.9931 - val_recall: 0.9690
Epoch 23/100
21/21 [==============================] - 3s 125ms/step - loss: 0.0652 - binary_accuracy: 0.9729 - precision: 0.9924 - recall: 0.9707 - val_loss: 1.8465 - val_binary_accuracy: 0.7180 - val_precision: 0.7180 - val_recall: 1.0000
Epoch 24/100
21/21 [==============================] - 3s 124ms/step - loss: 0.0538 - binary_accuracy: 0.9783 - precision: 0.9951 - recall: 0.9755 - val_loss: 1.5769 - val_binary_accuracy: 0.7180 - val_precision: 0.7180 - val_recall: 1.0000
Epoch 25/100
21/21 [==============================] - 4s 167ms/step - loss: 0.0549 - binary_accuracy: 0.9776 - precision: 0.9954 - recall: 0.9743 - val_loss: 0.0590 - val_binary_accuracy: 0.9777 - val_precision: 0.9904 - val_recall: 0.9784
Epoch 26/100
21/21 [==============================] - 3s 131ms/step - loss: 0.0677 - binary_accuracy: 0.9719 - precision: 0.9924 - recall: 0.9694 - val_loss: 2.6008 - val_binary_accuracy: 0.6928 - val_precision: 0.9977 - val_recall: 0.5735
Epoch 27/100
21/21 [==============================] - 3s 127ms/step - loss: 0.0469 - binary_accuracy: 0.9833 - precision: 0.9971 - recall: 0.9804 - val_loss: 1.0184 - val_binary_accuracy: 0.8605 - val_precision: 0.9983 - val_recall: 0.8070
Epoch 28/100
21/21 [==============================] - 3s 126ms/step - loss: 0.0501 - binary_accuracy: 0.9790 - precision: 0.9961 - recall: 0.9755 - val_loss: 0.3737 - val_binary_accuracy: 0.9089 - val_precision: 0.9954 - val_recall: 0.8772
Epoch 29/100
21/21 [==============================] - 3s 128ms/step - loss: 0.0548 - binary_accuracy: 0.9798 - precision: 0.9941 - recall: 0.9784 - val_loss: 1.2928 - val_binary_accuracy: 0.7907 - val_precision: 1.0000 - val_recall: 0.7085
Epoch 30/100
21/21 [==============================] - 3s 129ms/step - loss: 0.0370 - binary_accuracy: 0.9860 - precision: 0.9980 - recall: 0.9829 - val_loss: 0.1370 - val_binary_accuracy: 0.9612 - val_precision: 0.9972 - val_recall: 0.9487
Epoch 31/100
21/21 [==============================] - 3s 125ms/step - loss: 0.0585 - binary_accuracy: 0.9819 - precision: 0.9951 - recall: 0.9804 - val_loss: 1.1955 - val_binary_accuracy: 0.6870 - val_precision: 0.9976 - val_recall: 0.5655
Epoch 32/100
21/21 [==============================] - 3s 140ms/step - loss: 0.0813 - binary_accuracy: 0.9695 - precision: 0.9934 - recall: 0.9652 - val_loss: 1.0394 - val_binary_accuracy: 0.8576 - val_precision: 0.9853 - val_recall: 0.8138
Epoch 33/100
21/21 [==============================] - 3s 128ms/step - loss: 0.1111 - binary_accuracy: 0.9555 - precision: 0.9870 - recall: 0.9524 - val_loss: 4.9438 - val_binary_accuracy: 0.5911 - val_precision: 1.0000 - val_recall: 0.4305
Epoch 34/100
21/21 [==============================] - 3s 130ms/step - loss: 0.0680 - binary_accuracy: 0.9726 - precision: 0.9921 - recall: 0.9707 - val_loss: 2.8822 - val_binary_accuracy: 0.7267 - val_precision: 0.9978 - val_recall: 0.6208
Epoch 35/100
21/21 [==============================] - 4s 187ms/step - loss: 0.0784 - binary_accuracy: 0.9712 - precision: 0.9892 - recall: 0.9717 - val_loss: 0.3940 - val_binary_accuracy: 0.9390 - val_precision: 0.9942 - val_recall: 0.9204
```
</div>
---
## Visualizing model performance
Let's plot the model accuracy and loss for the training and the validating set. Note that
no random seed is specified for this notebook. For your notebook, there might be slight
variance.
```python
fig, ax = plt.subplots(1, 4, figsize=(20, 3))
ax = ax.ravel()
for i, met in enumerate(["precision", "recall", "binary_accuracy", "loss"]):
ax[i].plot(history.history[met])
ax[i].plot(history.history["val_" + met])
ax[i].set_title("Model {}".format(met))
ax[i].set_xlabel("epochs")
ax[i].set_ylabel(met)
ax[i].legend(["train", "val"])
```

We see that the accuracy for our model is around 95%.
---
## Predict and evaluate results
Let's evaluate the model on our test data!
```python
model.evaluate(test_ds, return_dict=True)
```
<div class="k-default-codeblock">
```
4/4 [==============================] - 3s 708ms/step - loss: 0.9718 - binary_accuracy: 0.7901 - precision: 0.7524 - recall: 0.9897
{'binary_accuracy': 0.7900640964508057,
'loss': 0.9717951416969299,
'precision': 0.752436637878418,
'recall': 0.9897436499595642}
```
</div>
We see that our accuracy on our test data is lower than the accuracy for our validating
set. This may indicate overfitting.
Our recall is greater than our precision, indicating that almost all pneumonia images are
correctly identified but some normal images are falsely identified. We should aim to
increase our precision.
```python
for image, label in test_ds.take(1):
plt.imshow(image[0] / 255.0)
plt.title(CLASS_NAMES[label[0].numpy()])
prediction = model.predict(test_ds.take(1))[0]
scores = [1 - prediction, prediction]
for score, name in zip(scores, CLASS_NAMES):
print("This image is %.2f percent %s" % ((100 * score), name))
```
<div class="k-default-codeblock">
```
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:3: DeprecationWarning: In future, it will be an error for 'np.bool_' scalars to be interpreted as an index
This is separate from the ipykernel package so we can avoid doing imports until
This image is 47.19 percent NORMAL
This image is 52.81 percent PNEUMONIA
```
</div>

| keras-io/examples/vision/md/xray_classification_with_tpus.md/0 | {
"file_path": "keras-io/examples/vision/md/xray_classification_with_tpus.md",
"repo_id": "keras-io",
"token_count": 10185
} | 113 |
"""
Title: Augmenting convnets with aggregated attention
Author: [Aritra Roy Gosthipaty](https://twitter.com/ariG23498)
Date created: 2022/01/22
Last modified: 2022/01/22
Description: Building a patch-convnet architecture and visualizing its attention maps.
Accelerator: GPU
"""
"""
## Introduction
Vision transformers ([Dosovitskiy et. al](https://arxiv.org/abs/2010.11929))
have emerged as a powerful alternative to Convolutional Neural Networks.
ViTs process the images in a patch-based manner. The image information
is then aggregated into a `CLASS` token. This token correlates to the
most important patches of the image for a particular classification decision.
The interaction between the `CLASS` token and the patches can be visualized
to help explain a classification decision. In the academic paper
[Augmenting convolutional networks with attention-based aggregation](https://arxiv.org/abs/2112.13692)
by Touvron et. al, the authors propose to set up an equivalent visualization for
convnets. They propose to substitute the global average pooling layer
of a convnet with a Transformer layer. The self-attention layer of the
Transformer would produce attention maps that correspond to the
most attended patches of the image for the classification decision.
In this example, we minimally implement the ideas of
[Augmenting Convolutional networks with attention-based aggregation](https://arxiv.org/abs/2112.13692).
The main goal of this example is to cover the following ideas, with
minor modifications (to adjust the implementation with CIFAR10):
- The simple design for the attention-based pooling layer, such that
it explicitly provides the weights (importance) of the different
patches.
- The novel architecture of convnet is called the **PatchConvNet** which
deviates from the age old pyramidal architecture.
"""
"""
## Setup and Imports
This example requires TensorFlow Addons, which can be installed using
the following command:
```shell
pip install -U tensorflow-addons
```
"""
import math
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import keras
from keras import layers
from keras import ops
from tensorflow import data as tf_data
# Set seed for reproducibiltiy
SEED = 42
keras.utils.set_random_seed(SEED)
"""
## Hyperparameters
"""
# DATA
BATCH_SIZE = 128
BUFFER_SIZE = BATCH_SIZE * 2
AUTO = tf_data.AUTOTUNE
INPUT_SHAPE = (32, 32, 3)
NUM_CLASSES = 10 # for CIFAR 10
# AUGMENTATION
IMAGE_SIZE = 48 # We will resize input images to this size.
# ARCHITECTURE
DIMENSIONS = 256
SE_RATIO = 8
TRUNK_DEPTH = 2
# OPTIMIZER
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-4
# PRETRAINING
EPOCHS = 50
"""
## Load the CIFAR10 dataset
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
(x_train, y_train), (x_val, y_val) = (
(x_train[:40000], y_train[:40000]),
(x_train[40000:], y_train[40000:]),
)
print(f"Training samples: {len(x_train)}")
print(f"Validation samples: {len(x_val)}")
print(f"Testing samples: {len(x_test)}")
train_ds = tf_data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(AUTO)
val_ds = tf_data.Dataset.from_tensor_slices((x_val, y_val))
val_ds = val_ds.batch(BATCH_SIZE).prefetch(AUTO)
test_ds = tf_data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)
"""
## Augmentation layers
"""
def get_preprocessing():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),
],
name="preprocessing",
)
return model
def get_train_augmentation_model():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
layers.RandomFlip("horizontal"),
],
name="train_data_augmentation",
)
return model
"""
## Convolutional stem
The stem of the model is a lightweight preprocessing module that
maps images pixels to a set of vectors (patches).
"""
def build_convolutional_stem(dimensions):
"""Build the convolutional stem.
Args:
dimensions: The embedding dimension of the patches (d in paper).
Returs:
The convolutional stem as a keras seqeuntial
model.
"""
config = {
"kernel_size": (3, 3),
"strides": (2, 2),
"activation": ops.gelu,
"padding": "same",
}
convolutional_stem = keras.Sequential(
[
layers.Conv2D(filters=dimensions // 2, **config),
layers.Conv2D(filters=dimensions, **config),
],
name="convolutional_stem",
)
return convolutional_stem
"""
## Convolutional trunk
The trunk of the model is the most compute-intesive part. It consists
of `N` stacked residual convolutional blocks.
"""
class SqueezeExcite(layers.Layer):
"""Applies squeeze and excitation to input feature maps as seen in
https://arxiv.org/abs/1709.01507.
Args:
ratio: The ratio with which the feature map needs to be reduced in
the reduction phase.
Inputs:
Convolutional features.
Outputs:
Attention modified feature maps.
"""
def __init__(self, ratio, **kwargs):
super().__init__(**kwargs)
self.ratio = ratio
def get_config(self):
config = super().get_config()
config.update({"ratio": self.ratio})
return config
def build(self, input_shape):
filters = input_shape[-1]
self.squeeze = layers.GlobalAveragePooling2D(keepdims=True)
self.reduction = layers.Dense(
units=filters // self.ratio,
activation="relu",
use_bias=False,
)
self.excite = layers.Dense(units=filters, activation="sigmoid", use_bias=False)
self.multiply = layers.Multiply()
def call(self, x):
shortcut = x
x = self.squeeze(x)
x = self.reduction(x)
x = self.excite(x)
x = self.multiply([shortcut, x])
return x
class Trunk(layers.Layer):
"""Convolutional residual trunk as in the https://arxiv.org/abs/2112.13692
Args:
depth: Number of trunk residual blocks
dimensions: Dimnesion of the model (denoted by d in the paper)
ratio: The Squeeze-Excitation ratio
Inputs:
Convolutional features extracted from the conv stem.
Outputs:
Flattened patches.
"""
def __init__(self, depth, dimensions, ratio, **kwargs):
super().__init__(**kwargs)
self.ratio = ratio
self.dimensions = dimensions
self.depth = depth
def get_config(self):
config = super().get_config()
config.update(
{
"ratio": self.ratio,
"dimensions": self.dimensions,
"depth": self.depth,
}
)
return config
def build(self, input_shape):
config = {
"filters": self.dimensions,
"activation": ops.gelu,
"padding": "same",
}
trunk_block = [
layers.LayerNormalization(epsilon=1e-6),
layers.Conv2D(kernel_size=(1, 1), **config),
layers.Conv2D(kernel_size=(3, 3), **config),
SqueezeExcite(ratio=self.ratio),
layers.Conv2D(kernel_size=(1, 1), filters=self.dimensions, padding="same"),
]
self.trunk_blocks = [keras.Sequential(trunk_block) for _ in range(self.depth)]
self.add = layers.Add()
self.flatten_spatial = layers.Reshape((-1, self.dimensions))
def call(self, x):
# Remember the input.
shortcut = x
for trunk_block in self.trunk_blocks:
output = trunk_block(x)
shortcut = self.add([output, shortcut])
x = shortcut
# Flatten the patches.
x = self.flatten_spatial(x)
return x
"""
## Attention Pooling
The output of the convolutional trunk is attended with a trainable
_query_ class token. The resulting attention map is the weight of
every patch of the image for a classification decision.
"""
class AttentionPooling(layers.Layer):
"""Applies attention to the patches extracted form the
trunk with the CLS token.
Args:
dimensions: The dimension of the whole architecture.
num_classes: The number of classes in the dataset.
Inputs:
Flattened patches from the trunk.
Outputs:
The modifies CLS token.
"""
def __init__(self, dimensions, num_classes, **kwargs):
super().__init__(**kwargs)
self.dimensions = dimensions
self.num_classes = num_classes
self.cls = keras.Variable(ops.zeros((1, 1, dimensions)))
def get_config(self):
config = super().get_config()
config.update(
{
"dimensions": self.dimensions,
"num_classes": self.num_classes,
"cls": self.cls.numpy(),
}
)
return config
def build(self, input_shape):
self.attention = layers.MultiHeadAttention(
num_heads=1,
key_dim=self.dimensions,
dropout=0.2,
)
self.layer_norm1 = layers.LayerNormalization(epsilon=1e-6)
self.layer_norm2 = layers.LayerNormalization(epsilon=1e-6)
self.layer_norm3 = layers.LayerNormalization(epsilon=1e-6)
self.mlp = keras.Sequential(
[
layers.Dense(units=self.dimensions, activation=ops.gelu),
layers.Dropout(0.2),
layers.Dense(units=self.dimensions, activation=ops.gelu),
]
)
self.dense = layers.Dense(units=self.num_classes)
self.flatten = layers.Flatten()
def call(self, x):
batch_size = ops.shape(x)[0]
# Expand the class token batch number of times.
class_token = ops.repeat(self.cls, repeats=batch_size, axis=0)
# Concat the input with the trainable class token.
x = ops.concatenate([class_token, x], axis=1)
# Apply attention to x.
x = self.layer_norm1(x)
x, viz_weights = self.attention(
query=x[:, 0:1], key=x, value=x, return_attention_scores=True
)
class_token = class_token + x
class_token = self.layer_norm2(class_token)
class_token = self.flatten(class_token)
class_token = self.layer_norm3(class_token)
class_token = class_token + self.mlp(class_token)
# Build the logits
logits = self.dense(class_token)
return logits, ops.squeeze(viz_weights)[..., 1:]
"""
## Patch convnet
The patch-convnet is shown in the figure below.
|  |
| :--: |
| [Source](https://arxiv.org/abs/2112.13692) |
All the modules in the architecture are built in the earlier seciton.
In this section, we stack all of the different modules together.
"""
class PatchConvNet(keras.Model):
def __init__(
self,
stem,
trunk,
attention_pooling,
preprocessing_model,
train_augmentation_model,
**kwargs,
):
super().__init__(**kwargs)
self.stem = stem
self.trunk = trunk
self.attention_pooling = attention_pooling
self.train_augmentation_model = train_augmentation_model
self.preprocessing_model = preprocessing_model
def get_config(self):
config = super().get_config()
config.update(
{
"stem": self.stem,
"trunk": self.trunk,
"attention_pooling": self.attention_pooling,
"train_augmentation_model": self.train_augmentation_model,
"preprocessing_model": self.preprocessing_model,
}
)
return config
def _calculate_loss(self, inputs, test=False):
images, labels = inputs
# Augment the input images.
if test:
augmented_images = self.preprocessing_model(images)
else:
augmented_images = self.train_augmentation_model(images)
# Pass through the stem.
x = self.stem(augmented_images)
# Pass through the trunk.
x = self.trunk(x)
# Pass through the attention pooling block.
logits, _ = self.attention_pooling(x)
# Compute the total loss.
total_loss = self.compiled_loss(labels, logits)
return total_loss, logits
def train_step(self, inputs):
with tf.GradientTape() as tape:
total_loss, logits = self._calculate_loss(inputs)
# Apply gradients.
train_vars = [
self.stem.trainable_variables,
self.trunk.trainable_variables,
self.attention_pooling.trainable_variables,
]
grads = tape.gradient(total_loss, train_vars)
trainable_variable_list = []
for grad, var in zip(grads, train_vars):
for g, v in zip(grad, var):
trainable_variable_list.append((g, v))
self.optimizer.apply_gradients(trainable_variable_list)
# Report progress.
_, labels = inputs
self.compiled_metrics.update_state(labels, logits)
return {m.name: m.result() for m in self.metrics}
def test_step(self, inputs):
total_loss, logits = self._calculate_loss(inputs, test=True)
# Report progress.
_, labels = inputs
self.compiled_metrics.update_state(labels, logits)
return {m.name: m.result() for m in self.metrics}
def call(self, images):
# Augment the input images.
augmented_images = self.preprocessing_model(images)
# Pass through the stem.
x = self.stem(augmented_images)
# Pass through the trunk.
x = self.trunk(x)
# Pass through the attention pooling block.
logits, viz_weights = self.attention_pooling(x)
return logits, viz_weights
"""
## Callbacks
This callback will plot the image and the attention map overlayed on
the image.
"""
# Taking a batch of test inputs to measure model's progress.
test_images, test_labels = next(iter(test_ds))
class TrainMonitor(keras.callbacks.Callback):
def __init__(self, epoch_interval=None):
self.epoch_interval = epoch_interval
def on_epoch_end(self, epoch, logs=None):
if self.epoch_interval and epoch % self.epoch_interval == 4:
test_augmented_images = self.model.preprocessing_model(test_images)
# Pass through the stem.
test_x = self.model.stem(test_augmented_images)
# Pass through the trunk.
test_x = self.model.trunk(test_x)
# Pass through the attention pooling block.
_, test_viz_weights = self.model.attention_pooling(test_x)
# Reshape the vizualization weights
num_patches = ops.shape(test_viz_weights)[-1]
height = width = int(math.sqrt(num_patches))
test_viz_weights = layers.Reshape((height, width))(test_viz_weights)
# Take a random image and its attention weights.
index = np.random.randint(low=0, high=ops.shape(test_augmented_images)[0])
selected_image = test_augmented_images[index]
selected_weight = test_viz_weights[index]
# Plot the images and the overlayed attention map.
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
ax[0].imshow(selected_image)
ax[0].set_title(f"Original: {epoch:03d}")
ax[0].axis("off")
img = ax[1].imshow(selected_image)
ax[1].imshow(
selected_weight, cmap="inferno", alpha=0.6, extent=img.get_extent()
)
ax[1].set_title(f"Attended: {epoch:03d}")
ax[1].axis("off")
plt.axis("off")
plt.show()
plt.close()
"""
## Learning rate schedule
"""
class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps
):
super().__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.pi = np.pi
def __call__(self, step):
if self.total_steps < self.warmup_steps:
raise ValueError("Total_steps must be larger or equal to warmup_steps.")
cos_annealed_lr = ops.cos(
self.pi
* (ops.cast(step, "float32") - self.warmup_steps)
/ float(self.total_steps - self.warmup_steps)
)
learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr)
if self.warmup_steps > 0:
if self.learning_rate_base < self.warmup_learning_rate:
raise ValueError(
"Learning_rate_base must be larger or equal to "
"warmup_learning_rate."
)
slope = (
self.learning_rate_base - self.warmup_learning_rate
) / self.warmup_steps
warmup_rate = slope * ops.cast(step, "float32") + self.warmup_learning_rate
learning_rate = ops.where(
step < self.warmup_steps, warmup_rate, learning_rate
)
return ops.where(
step > self.total_steps,
0.0,
learning_rate,
)
total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS)
warmup_epoch_percentage = 0.15
warmup_steps = int(total_steps * warmup_epoch_percentage)
scheduled_lrs = WarmUpCosine(
learning_rate_base=LEARNING_RATE,
total_steps=total_steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
)
"""
## Training
We build the model, compile it, and train it.
"""
train_augmentation_model = get_train_augmentation_model()
preprocessing_model = get_preprocessing()
conv_stem = build_convolutional_stem(dimensions=DIMENSIONS)
conv_trunk = Trunk(depth=TRUNK_DEPTH, dimensions=DIMENSIONS, ratio=SE_RATIO)
attention_pooling = AttentionPooling(dimensions=DIMENSIONS, num_classes=NUM_CLASSES)
patch_conv_net = PatchConvNet(
stem=conv_stem,
trunk=conv_trunk,
attention_pooling=attention_pooling,
train_augmentation_model=train_augmentation_model,
preprocessing_model=preprocessing_model,
)
# Assemble the callbacks.
train_callbacks = [TrainMonitor(epoch_interval=5)]
# Get the optimizer.
optimizer = keras.optimizers.AdamW(
learning_rate=scheduled_lrs, weight_decay=WEIGHT_DECAY
)
# Compile and pretrain the model.
patch_conv_net.compile(
optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="accuracy"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
history = patch_conv_net.fit(
train_ds,
epochs=EPOCHS,
validation_data=val_ds,
callbacks=train_callbacks,
)
# Evaluate the model with the test dataset.
loss, acc_top1, acc_top5 = patch_conv_net.evaluate(test_ds)
print(f"Loss: {loss:0.2f}")
print(f"Top 1 test accuracy: {acc_top1*100:0.2f}%")
print(f"Top 5 test accuracy: {acc_top5*100:0.2f}%")
"""
## Inference
Here, we use the trained model to plot the attention map.
"""
def plot_attention(image):
"""Plots the attention map on top of the image.
Args:
image: A numpy image of arbitrary size.
"""
# Resize the image to a (32, 32) dim.
image = ops.image.resize(image, (32, 32))
image = image[np.newaxis, ...]
test_augmented_images = patch_conv_net.preprocessing_model(image)
# Pass through the stem.
test_x = patch_conv_net.stem(test_augmented_images)
# Pass through the trunk.
test_x = patch_conv_net.trunk(test_x)
# Pass through the attention pooling block.
_, test_viz_weights = patch_conv_net.attention_pooling(test_x)
test_viz_weights = test_viz_weights[np.newaxis, ...]
# Reshape the vizualization weights.
num_patches = ops.shape(test_viz_weights)[-1]
height = width = int(math.sqrt(num_patches))
test_viz_weights = layers.Reshape((height, width))(test_viz_weights)
selected_image = test_augmented_images[0]
selected_weight = test_viz_weights[0]
# Plot the images.
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
ax[0].imshow(selected_image)
ax[0].set_title(f"Original")
ax[0].axis("off")
img = ax[1].imshow(selected_image)
ax[1].imshow(selected_weight, cmap="inferno", alpha=0.6, extent=img.get_extent())
ax[1].set_title(f"Attended")
ax[1].axis("off")
plt.axis("off")
plt.show()
plt.close()
url = "http://farm9.staticflickr.com/8017/7140384795_385b1f48df_z.jpg"
image_name = keras.utils.get_file(fname="image.jpg", origin=url)
image = keras.utils.load_img(image_name)
image = keras.utils.img_to_array(image)
plot_attention(image)
"""
## Conclusions
The attention map corresponding to the trainable `CLASS`
token and the patches of the image helps explain the classificaiton
decision. One should also note that the attention maps gradually get
better. In the initial training regime, the attention is scattered all
around while at a later stage, it focuses more on the objects of the
image.
The non-pyramidal convnet achieves an accuracy of ~84-85% top-1 test
accuracy.
I would like to thank [JarvisLabs.ai](https://jarvislabs.ai/) for
providing GPU credits for this project.
"""
| keras-io/examples/vision/patch_convnet.py/0 | {
"file_path": "keras-io/examples/vision/patch_convnet.py",
"repo_id": "keras-io",
"token_count": 9250
} | 114 |
"""
Title: Image Super-Resolution using an Efficient Sub-Pixel CNN
Author: [Xingyu Long](https://github.com/xingyu-long)
Date created: 2020/07/28
Last modified: 2020/08/27
Description: Implementing Super-Resolution using Efficient sub-pixel model on BSDS500.
Accelerator: GPU
Converted to Keras 3 by: [Md Awsfalur Rahman](https://awsaf49.github.io)
"""
"""
## Introduction
ESPCN (Efficient Sub-Pixel CNN), proposed by [Shi, 2016](https://arxiv.org/abs/1609.05158)
is a model that reconstructs a high-resolution version of an image given a low-resolution
version.
It leverages efficient "sub-pixel convolution" layers, which learns an array of
image upscaling filters.
In this code example, we will implement the model from the paper and train it on a small
dataset,
[BSDS500](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html).
[BSDS500](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html).
"""
"""
## Setup
"""
import keras
from keras import layers
from keras import ops
from keras.utils import load_img
from keras.utils import array_to_img
from keras.utils import img_to_array
from keras.preprocessing import image_dataset_from_directory
import tensorflow as tf # only for data preprocessing
import os
import math
import numpy as np
from IPython.display import display
"""
## Load data: BSDS500 dataset
### Download dataset
We use the built-in `keras.utils.get_file` utility to retrieve the dataset.
"""
dataset_url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz"
data_dir = keras.utils.get_file(origin=dataset_url, fname="BSR", untar=True)
root_dir = os.path.join(data_dir, "BSDS500/data")
"""
We create training and validation datasets via `image_dataset_from_directory`.
"""
crop_size = 300
upscale_factor = 3
input_size = crop_size // upscale_factor
batch_size = 8
train_ds = image_dataset_from_directory(
root_dir,
batch_size=batch_size,
image_size=(crop_size, crop_size),
validation_split=0.2,
subset="training",
seed=1337,
label_mode=None,
)
valid_ds = image_dataset_from_directory(
root_dir,
batch_size=batch_size,
image_size=(crop_size, crop_size),
validation_split=0.2,
subset="validation",
seed=1337,
label_mode=None,
)
"""
We rescale the images to take values in the range [0, 1].
"""
def scaling(input_image):
input_image = input_image / 255.0
return input_image
# Scale from (0, 255) to (0, 1)
train_ds = train_ds.map(scaling)
valid_ds = valid_ds.map(scaling)
"""
Let's visualize a few sample images:
"""
for batch in train_ds.take(1):
for img in batch:
display(array_to_img(img))
"""
We prepare a dataset of test image paths that we will use for
visual evaluation at the end of this example.
"""
dataset = os.path.join(root_dir, "images")
test_path = os.path.join(dataset, "test")
test_img_paths = sorted(
[
os.path.join(test_path, fname)
for fname in os.listdir(test_path)
if fname.endswith(".jpg")
]
)
"""
## Crop and resize images
Let's process image data.
First, we convert our images from the RGB color space to the
[YUV colour space](https://en.wikipedia.org/wiki/YUV).
For the input data (low-resolution images),
we crop the image, retrieve the `y` channel (luninance),
and resize it with the `area` method (use `BICUBIC` if you use PIL).
We only consider the luminance channel
in the YUV color space because humans are more sensitive to
luminance change.
For the target data (high-resolution images), we just crop the image
and retrieve the `y` channel.
"""
# Use TF Ops to process.
def process_input(input, input_size, upscale_factor):
input = tf.image.rgb_to_yuv(input)
last_dimension_axis = len(input.shape) - 1
y, u, v = tf.split(input, 3, axis=last_dimension_axis)
return tf.image.resize(y, [input_size, input_size], method="area")
def process_target(input):
input = tf.image.rgb_to_yuv(input)
last_dimension_axis = len(input.shape) - 1
y, u, v = tf.split(input, 3, axis=last_dimension_axis)
return y
train_ds = train_ds.map(
lambda x: (process_input(x, input_size, upscale_factor), process_target(x))
)
train_ds = train_ds.prefetch(buffer_size=32)
valid_ds = valid_ds.map(
lambda x: (process_input(x, input_size, upscale_factor), process_target(x))
)
valid_ds = valid_ds.prefetch(buffer_size=32)
"""
Let's take a look at the input and target data.
"""
for batch in train_ds.take(1):
for img in batch[0]:
display(array_to_img(img))
for img in batch[1]:
display(array_to_img(img))
"""
## Build a model
Compared to the paper, we add one more layer and we use the `relu` activation function
instead of `tanh`.
It achieves better performance even though we train the model for fewer epochs.
"""
class DepthToSpace(layers.Layer):
def __init__(self, block_size):
super().__init__()
self.block_size = block_size
def call(self, input):
batch, height, width, depth = ops.shape(input)
depth = depth // (self.block_size**2)
x = ops.reshape(
input, [batch, height, width, self.block_size, self.block_size, depth]
)
x = ops.transpose(x, [0, 1, 3, 2, 4, 5])
x = ops.reshape(
x, [batch, height * self.block_size, width * self.block_size, depth]
)
return x
def get_model(upscale_factor=3, channels=1):
conv_args = {
"activation": "relu",
"kernel_initializer": "orthogonal",
"padding": "same",
}
inputs = keras.Input(shape=(None, None, channels))
x = layers.Conv2D(64, 5, **conv_args)(inputs)
x = layers.Conv2D(64, 3, **conv_args)(x)
x = layers.Conv2D(32, 3, **conv_args)(x)
x = layers.Conv2D(channels * (upscale_factor**2), 3, **conv_args)(x)
outputs = DepthToSpace(upscale_factor)(x)
return keras.Model(inputs, outputs)
"""
## Define utility functions
We need to define several utility functions to monitor our results:
- `plot_results` to plot an save an image.
- `get_lowres_image` to convert an image to its low-resolution version.
- `upscale_image` to turn a low-resolution image to
a high-resolution version reconstructed by the model.
In this function, we use the `y` channel from the YUV color space
as input to the model and then combine the output with the
other channels to obtain an RGB image.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import PIL
def plot_results(img, prefix, title):
"""Plot the result with zoom-in area."""
img_array = img_to_array(img)
img_array = img_array.astype("float32") / 255.0
# Create a new figure with a default 111 subplot.
fig, ax = plt.subplots()
im = ax.imshow(img_array[::-1], origin="lower")
plt.title(title)
# zoom-factor: 2.0, location: upper-left
axins = zoomed_inset_axes(ax, 2, loc=2)
axins.imshow(img_array[::-1], origin="lower")
# Specify the limits.
x1, x2, y1, y2 = 200, 300, 100, 200
# Apply the x-limits.
axins.set_xlim(x1, x2)
# Apply the y-limits.
axins.set_ylim(y1, y2)
plt.yticks(visible=False)
plt.xticks(visible=False)
# Make the line.
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="blue")
plt.savefig(str(prefix) + "-" + title + ".png")
plt.show()
def get_lowres_image(img, upscale_factor):
"""Return low-resolution image to use as model input."""
return img.resize(
(img.size[0] // upscale_factor, img.size[1] // upscale_factor),
PIL.Image.BICUBIC,
)
def upscale_image(model, img):
"""Predict the result based on input image and restore the image as RGB."""
ycbcr = img.convert("YCbCr")
y, cb, cr = ycbcr.split()
y = img_to_array(y)
y = y.astype("float32") / 255.0
input = np.expand_dims(y, axis=0)
out = model.predict(input)
out_img_y = out[0]
out_img_y *= 255.0
# Restore the image in RGB color space.
out_img_y = out_img_y.clip(0, 255)
out_img_y = out_img_y.reshape((np.shape(out_img_y)[0], np.shape(out_img_y)[1]))
out_img_y = PIL.Image.fromarray(np.uint8(out_img_y), mode="L")
out_img_cb = cb.resize(out_img_y.size, PIL.Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, PIL.Image.BICUBIC)
out_img = PIL.Image.merge("YCbCr", (out_img_y, out_img_cb, out_img_cr)).convert(
"RGB"
)
return out_img
"""
## Define callbacks to monitor training
The `ESPCNCallback` object will compute and display
the [PSNR](https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio) metric.
This is the main metric we use to evaluate super-resolution performance.
"""
class ESPCNCallback(keras.callbacks.Callback):
def __init__(self):
super().__init__()
self.test_img = get_lowres_image(load_img(test_img_paths[0]), upscale_factor)
# Store PSNR value in each epoch.
def on_epoch_begin(self, epoch, logs=None):
self.psnr = []
def on_epoch_end(self, epoch, logs=None):
print("Mean PSNR for epoch: %.2f" % (np.mean(self.psnr)))
if epoch % 20 == 0:
prediction = upscale_image(self.model, self.test_img)
plot_results(prediction, "epoch-" + str(epoch), "prediction")
def on_test_batch_end(self, batch, logs=None):
self.psnr.append(10 * math.log10(1 / logs["loss"]))
"""
Define `ModelCheckpoint` and `EarlyStopping` callbacks.
"""
early_stopping_callback = keras.callbacks.EarlyStopping(monitor="loss", patience=10)
checkpoint_filepath = "/tmp/checkpoint.keras"
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=False,
monitor="loss",
mode="min",
save_best_only=True,
)
model = get_model(upscale_factor=upscale_factor, channels=1)
model.summary()
callbacks = [ESPCNCallback(), early_stopping_callback, model_checkpoint_callback]
loss_fn = keras.losses.MeanSquaredError()
optimizer = keras.optimizers.Adam(learning_rate=0.001)
"""
## Train the model
"""
epochs = 100
model.compile(
optimizer=optimizer,
loss=loss_fn,
)
model.fit(
train_ds, epochs=epochs, callbacks=callbacks, validation_data=valid_ds, verbose=2
)
# The model weights (that are considered the best) are loaded into the model.
model.load_weights(checkpoint_filepath)
"""
## Run model prediction and plot the results
Let's compute the reconstructed version of a few images and save the results.
"""
total_bicubic_psnr = 0.0
total_test_psnr = 0.0
for index, test_img_path in enumerate(test_img_paths[50:60]):
img = load_img(test_img_path)
lowres_input = get_lowres_image(img, upscale_factor)
w = lowres_input.size[0] * upscale_factor
h = lowres_input.size[1] * upscale_factor
highres_img = img.resize((w, h))
prediction = upscale_image(model, lowres_input)
lowres_img = lowres_input.resize((w, h))
lowres_img_arr = img_to_array(lowres_img)
highres_img_arr = img_to_array(highres_img)
predict_img_arr = img_to_array(prediction)
bicubic_psnr = tf.image.psnr(lowres_img_arr, highres_img_arr, max_val=255)
test_psnr = tf.image.psnr(predict_img_arr, highres_img_arr, max_val=255)
total_bicubic_psnr += bicubic_psnr
total_test_psnr += test_psnr
print(
"PSNR of low resolution image and high resolution image is %.4f" % bicubic_psnr
)
print("PSNR of predict and high resolution is %.4f" % test_psnr)
plot_results(lowres_img, index, "lowres")
plot_results(highres_img, index, "highres")
plot_results(prediction, index, "prediction")
print("Avg. PSNR of lowres images is %.4f" % (total_bicubic_psnr / 10))
print("Avg. PSNR of reconstructions is %.4f" % (total_test_psnr / 10))
| keras-io/examples/vision/super_resolution_sub_pixel.py/0 | {
"file_path": "keras-io/examples/vision/super_resolution_sub_pixel.py",
"repo_id": "keras-io",
"token_count": 4627
} | 115 |
"""
Title: Working with RNNs
Authors: Scott Zhu, Francois Chollet
Date created: 2019/07/08
Last modified: 2023/07/10
Description: Complete guide to using & customizing RNN layers.
Accelerator: GPU
"""
"""
## Introduction
Recurrent neural networks (RNN) are a class of neural networks that is powerful for
modeling sequence data such as time series or natural language.
Schematically, a RNN layer uses a `for` loop to iterate over the timesteps of a
sequence, while maintaining an internal state that encodes information about the
timesteps it has seen so far.
The Keras RNN API is designed with a focus on:
- **Ease of use**: the built-in `keras.layers.RNN`, `keras.layers.LSTM`,
`keras.layers.GRU` layers enable you to quickly build recurrent models without
having to make difficult configuration choices.
- **Ease of customization**: You can also define your own RNN cell layer (the inner
part of the `for` loop) with custom behavior, and use it with the generic
`keras.layers.RNN` layer (the `for` loop itself). This allows you to quickly
prototype different research ideas in a flexible way with minimal code.
"""
"""
## Setup
"""
import numpy as np
import tensorflow as tf
import keras
from keras import layers
"""
## Built-in RNN layers: a simple example
"""
"""
There are three built-in RNN layers in Keras:
1. `keras.layers.SimpleRNN`, a fully-connected RNN where the output from previous
timestep is to be fed to next timestep.
2. `keras.layers.GRU`, first proposed in
[Cho et al., 2014](https://arxiv.org/abs/1406.1078).
3. `keras.layers.LSTM`, first proposed in
[Hochreiter & Schmidhuber, 1997](https://www.bioinf.jku.at/publications/older/2604.pdf).
In early 2015, Keras had the first reusable open-source Python implementations of LSTM
and GRU.
Here is a simple example of a `Sequential` model that processes sequences of integers,
embeds each integer into a 64-dimensional vector, then processes the sequence of
vectors using a `LSTM` layer.
"""
model = keras.Sequential()
# Add an Embedding layer expecting input vocab of size 1000, and
# output embedding dimension of size 64.
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# Add a LSTM layer with 128 internal units.
model.add(layers.LSTM(128))
# Add a Dense layer with 10 units.
model.add(layers.Dense(10))
model.summary()
"""
Built-in RNNs support a number of useful features:
- Recurrent dropout, via the `dropout` and `recurrent_dropout` arguments
- Ability to process an input sequence in reverse, via the `go_backwards` argument
- Loop unrolling (which can lead to a large speedup when processing short sequences on
CPU), via the `unroll` argument
- ...and more.
For more information, see the
[RNN API documentation](https://keras.io/api/layers/recurrent_layers/).
"""
"""
## Outputs and states
By default, the output of a RNN layer contains a single vector per sample. This vector
is the RNN cell output corresponding to the last timestep, containing information
about the entire input sequence. The shape of this output is `(batch_size, units)`
where `units` corresponds to the `units` argument passed to the layer's constructor.
A RNN layer can also return the entire sequence of outputs for each sample (one vector
per timestep per sample), if you set `return_sequences=True`. The shape of this output
is `(batch_size, timesteps, units)`.
"""
model = keras.Sequential()
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256)
model.add(layers.GRU(256, return_sequences=True))
# The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128)
model.add(layers.SimpleRNN(128))
model.add(layers.Dense(10))
model.summary()
"""
In addition, a RNN layer can return its final internal state(s). The returned states
can be used to resume the RNN execution later, or
[to initialize another RNN](https://arxiv.org/abs/1409.3215).
This setting is commonly used in the
encoder-decoder sequence-to-sequence model, where the encoder final state is used as
the initial state of the decoder.
To configure a RNN layer to return its internal state, set the `return_state` parameter
to `True` when creating the layer. Note that `LSTM` has 2 state tensors, but `GRU`
only has one.
To configure the initial state of the layer, just call the layer with additional
keyword argument `initial_state`.
Note that the shape of the state needs to match the unit size of the layer, like in the
example below.
"""
encoder_vocab = 1000
decoder_vocab = 2000
encoder_input = layers.Input(shape=(None,))
encoder_embedded = layers.Embedding(input_dim=encoder_vocab, output_dim=64)(
encoder_input
)
# Return states in addition to output
output, state_h, state_c = layers.LSTM(64, return_state=True, name="encoder")(
encoder_embedded
)
encoder_state = [state_h, state_c]
decoder_input = layers.Input(shape=(None,))
decoder_embedded = layers.Embedding(input_dim=decoder_vocab, output_dim=64)(
decoder_input
)
# Pass the 2 states to a new LSTM layer, as initial state
decoder_output = layers.LSTM(64, name="decoder")(
decoder_embedded, initial_state=encoder_state
)
output = layers.Dense(10)(decoder_output)
model = keras.Model([encoder_input, decoder_input], output)
model.summary()
"""
## RNN layers and RNN cells
In addition to the built-in RNN layers, the RNN API also provides cell-level APIs.
Unlike RNN layers, which processes whole batches of input sequences, the RNN cell only
processes a single timestep.
The cell is the inside of the `for` loop of a RNN layer. Wrapping a cell inside a
`keras.layers.RNN` layer gives you a layer capable of processing batches of
sequences, e.g. `RNN(LSTMCell(10))`.
Mathematically, `RNN(LSTMCell(10))` produces the same result as `LSTM(10)`. In fact,
the implementation of this layer in TF v1.x was just creating the corresponding RNN
cell and wrapping it in a RNN layer. However using the built-in `GRU` and `LSTM`
layers enable the use of CuDNN and you may see better performance.
There are three built-in RNN cells, each of them corresponding to the matching RNN
layer.
- `keras.layers.SimpleRNNCell` corresponds to the `SimpleRNN` layer.
- `keras.layers.GRUCell` corresponds to the `GRU` layer.
- `keras.layers.LSTMCell` corresponds to the `LSTM` layer.
The cell abstraction, together with the generic `keras.layers.RNN` class, make it
very easy to implement custom RNN architectures for your research.
"""
"""
## Cross-batch statefulness
When processing very long sequences (possibly infinite), you may want to use the
pattern of **cross-batch statefulness**.
Normally, the internal state of a RNN layer is reset every time it sees a new batch
(i.e. every sample seen by the layer is assumed to be independent of the past). The
layer will only maintain a state while processing a given sample.
If you have very long sequences though, it is useful to break them into shorter
sequences, and to feed these shorter sequences sequentially into a RNN layer without
resetting the layer's state. That way, the layer can retain information about the
entirety of the sequence, even though it's only seeing one sub-sequence at a time.
You can do this by setting `stateful=True` in the constructor.
If you have a sequence `s = [t0, t1, ... t1546, t1547]`, you would split it into e.g.
```
s1 = [t0, t1, ... t100]
s2 = [t101, ... t201]
...
s16 = [t1501, ... t1547]
```
Then you would process it via:
```python
lstm_layer = layers.LSTM(64, stateful=True)
for s in sub_sequences:
output = lstm_layer(s)
```
When you want to clear the state, you can use `layer.reset_states()`.
> Note: In this setup, sample `i` in a given batch is assumed to be the continuation of
sample `i` in the previous batch. This means that all batches should contain the same
number of samples (batch size). E.g. if a batch contains `[sequence_A_from_t0_to_t100,
sequence_B_from_t0_to_t100]`, the next batch should contain
`[sequence_A_from_t101_to_t200, sequence_B_from_t101_to_t200]`.
Here is a complete example:
"""
paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph2 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph3 = np.random.random((20, 10, 50)).astype(np.float32)
lstm_layer = layers.LSTM(64, stateful=True)
output = lstm_layer(paragraph1)
output = lstm_layer(paragraph2)
output = lstm_layer(paragraph3)
# reset_states() will reset the cached state to the original initial_state.
# If no initial_state was provided, zero-states will be used by default.
lstm_layer.reset_states()
"""
### RNN State Reuse
<a id="rnn_state_reuse"></a>
"""
"""
The recorded states of the RNN layer are not included in the `layer.weights()`. If you
would like to reuse the state from a RNN layer, you can retrieve the states value by
`layer.states` and use it as the
initial state for a new layer via the Keras functional API like `new_layer(inputs,
initial_state=layer.states)`, or model subclassing.
Please also note that sequential model might not be used in this case since it only
supports layers with single input and output, the extra input of initial state makes
it impossible to use here.
"""
paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph2 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph3 = np.random.random((20, 10, 50)).astype(np.float32)
lstm_layer = layers.LSTM(64, stateful=True)
output = lstm_layer(paragraph1)
output = lstm_layer(paragraph2)
existing_state = lstm_layer.states
new_lstm_layer = layers.LSTM(64)
new_output = new_lstm_layer(paragraph3, initial_state=existing_state)
"""
## Bidirectional RNNs
For sequences other than time series (e.g. text), it is often the case that a RNN model
can perform better if it not only processes sequence from start to end, but also
backwards. For example, to predict the next word in a sentence, it is often useful to
have the context around the word, not only just the words that come before it.
Keras provides an easy API for you to build such bidirectional RNNs: the
`keras.layers.Bidirectional` wrapper.
"""
model = keras.Sequential()
model.add(
layers.Bidirectional(layers.LSTM(64, return_sequences=True), input_shape=(5, 10))
)
model.add(layers.Bidirectional(layers.LSTM(32)))
model.add(layers.Dense(10))
model.summary()
"""
Under the hood, `Bidirectional` will copy the RNN layer passed in, and flip the
`go_backwards` field of the newly copied layer, so that it will process the inputs in
reverse order.
The output of the `Bidirectional` RNN will be, by default, the concatenation of the forward layer
output and the backward layer output. If you need a different merging behavior, e.g.
concatenation, change the `merge_mode` parameter in the `Bidirectional` wrapper
constructor. For more details about `Bidirectional`, please check
[the API docs](https://keras.io/api/layers/recurrent_layers/bidirectional/).
"""
"""
## Performance optimization and CuDNN kernels
In TensorFlow 2.0, the built-in LSTM and GRU layers have been updated to leverage CuDNN
kernels by default when a GPU is available. With this change, the prior
`keras.layers.CuDNNLSTM/CuDNNGRU` layers have been deprecated, and you can build your
model without worrying about the hardware it will run on.
Since the CuDNN kernel is built with certain assumptions, this means the layer **will
not be able to use the CuDNN kernel if you change the defaults of the built-in LSTM or
GRU layers**. E.g.:
- Changing the `activation` function from `tanh` to something else.
- Changing the `recurrent_activation` function from `sigmoid` to something else.
- Using `recurrent_dropout` > 0.
- Setting `unroll` to True, which forces LSTM/GRU to decompose the inner
`tf.while_loop` into an unrolled `for` loop.
- Setting `use_bias` to False.
- Using masking when the input data is not strictly right padded (if the mask
corresponds to strictly right padded data, CuDNN can still be used. This is the most
common case).
For the detailed list of constraints, please see the documentation for the
[LSTM](https://keras.io/api/layers/recurrent_layers/lstm/) and
[GRU](https://keras.io/api/layers/recurrent_layers/gru/) layers.
"""
"""
### Using CuDNN kernels when available
Let's build a simple LSTM model to demonstrate the performance difference.
We'll use as input sequences the sequence of rows of MNIST digits (treating each row of
pixels as a timestep), and we'll predict the digit's label.
"""
batch_size = 64
# Each MNIST image batch is a tensor of shape (batch_size, 28, 28).
# Each input sequence will be of size (28, 28) (height is treated like time).
input_dim = 28
units = 64
output_size = 10 # labels are from 0 to 9
# Build the RNN model
def build_model(allow_cudnn_kernel=True):
# CuDNN is only available at the layer level, and not at the cell level.
# This means `LSTM(units)` will use the CuDNN kernel,
# while RNN(LSTMCell(units)) will run on non-CuDNN kernel.
if allow_cudnn_kernel:
# The LSTM layer with default options uses CuDNN.
lstm_layer = keras.layers.LSTM(units, input_shape=(None, input_dim))
else:
# Wrapping a LSTMCell in a RNN layer will not use CuDNN.
lstm_layer = keras.layers.RNN(
keras.layers.LSTMCell(units), input_shape=(None, input_dim)
)
model = keras.models.Sequential(
[
lstm_layer,
keras.layers.BatchNormalization(),
keras.layers.Dense(output_size),
]
)
return model
"""
Let's load the MNIST dataset:
"""
mnist = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
sample, sample_label = x_train[0], y_train[0]
"""
Let's create a model instance and train it.
We choose `sparse_categorical_crossentropy` as the loss function for the model. The
output of the model has shape of `[batch_size, 10]`. The target for the model is an
integer vector, each of the integer is in the range of 0 to 9.
"""
model = build_model(allow_cudnn_kernel=True)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="sgd",
metrics=["accuracy"],
)
model.fit(
x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1
)
"""
Now, let's compare to a model that does not use the CuDNN kernel:
"""
noncudnn_model = build_model(allow_cudnn_kernel=False)
noncudnn_model.set_weights(model.get_weights())
noncudnn_model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="sgd",
metrics=["accuracy"],
)
noncudnn_model.fit(
x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1
)
"""
When running on a machine with a NVIDIA GPU and CuDNN installed,
the model built with CuDNN is much faster to train compared to the
model that uses the regular TensorFlow kernel.
The same CuDNN-enabled model can also be used to run inference in a CPU-only
environment. The `tf.device` annotation below is just forcing the device placement.
The model will run on CPU by default if no GPU is available.
You simply don't have to worry about the hardware you're running on anymore. Isn't that
pretty cool?
"""
import matplotlib.pyplot as plt
with tf.device("CPU:0"):
cpu_model = build_model(allow_cudnn_kernel=True)
cpu_model.set_weights(model.get_weights())
result = tf.argmax(cpu_model.predict_on_batch(tf.expand_dims(sample, 0)), axis=1)
print(
"Predicted result is: %s, target result is: %s" % (result.numpy(), sample_label)
)
plt.imshow(sample, cmap=plt.get_cmap("gray"))
"""
## RNNs with list/dict inputs, or nested inputs
Nested structures allow implementers to include more information within a single
timestep. For example, a video frame could have audio and video input at the same
time. The data shape in this case could be:
`[batch, timestep, {"video": [height, width, channel], "audio": [frequency]}]`
In another example, handwriting data could have both coordinates x and y for the
current position of the pen, as well as pressure information. So the data
representation could be:
`[batch, timestep, {"location": [x, y], "pressure": [force]}]`
The following code provides an example of how to build a custom RNN cell that accepts
such structured inputs.
"""
"""
### Define a custom cell that supports nested input/output
"""
"""
See [Making new Layers & Models via subclassing](/guides/making_new_layers_and_models_via_subclassing/)
for details on writing your own layers.
"""
@keras.saving.register_keras_serializable()
class NestedCell(keras.layers.Layer):
def __init__(self, unit_1, unit_2, unit_3, **kwargs):
self.unit_1 = unit_1
self.unit_2 = unit_2
self.unit_3 = unit_3
self.state_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])]
self.output_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])]
super().__init__(**kwargs)
def build(self, input_shapes):
# expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)]
i1 = input_shapes[0][1]
i2 = input_shapes[1][1]
i3 = input_shapes[1][2]
self.kernel_1 = self.add_weight(
shape=(i1, self.unit_1), initializer="uniform", name="kernel_1"
)
self.kernel_2_3 = self.add_weight(
shape=(i2, i3, self.unit_2, self.unit_3),
initializer="uniform",
name="kernel_2_3",
)
def call(self, inputs, states):
# inputs should be in [(batch, input_1), (batch, input_2, input_3)]
# state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)]
input_1, input_2 = tf.nest.flatten(inputs)
s1, s2 = states
output_1 = tf.matmul(input_1, self.kernel_1)
output_2_3 = tf.einsum("bij,ijkl->bkl", input_2, self.kernel_2_3)
state_1 = s1 + output_1
state_2_3 = s2 + output_2_3
output = (output_1, output_2_3)
new_states = (state_1, state_2_3)
return output, new_states
def get_config(self):
return {"unit_1": self.unit_1, "unit_2": self.unit_2, "unit_3": self.unit_3}
"""
### Build a RNN model with nested input/output
Let's build a Keras model that uses a `keras.layers.RNN` layer and the custom cell
we just defined.
"""
unit_1 = 10
unit_2 = 20
unit_3 = 30
i1 = 32
i2 = 64
i3 = 32
batch_size = 64
num_batches = 10
timestep = 50
cell = NestedCell(unit_1, unit_2, unit_3)
rnn = keras.layers.RNN(cell)
input_1 = keras.Input((None, i1))
input_2 = keras.Input((None, i2, i3))
outputs = rnn((input_1, input_2))
model = keras.models.Model([input_1, input_2], outputs)
model.compile(optimizer="adam", loss="mse", metrics=["accuracy"])
"""
### Train the model with randomly generated data
Since there isn't a good candidate dataset for this model, we use random Numpy data for
demonstration.
"""
input_1_data = np.random.random((batch_size * num_batches, timestep, i1))
input_2_data = np.random.random((batch_size * num_batches, timestep, i2, i3))
target_1_data = np.random.random((batch_size * num_batches, unit_1))
target_2_data = np.random.random((batch_size * num_batches, unit_2, unit_3))
input_data = [input_1_data, input_2_data]
target_data = [target_1_data, target_2_data]
model.fit(input_data, target_data, batch_size=batch_size)
"""
With the Keras `keras.layers.RNN` layer, You are only expected to define the math
logic for individual step within the sequence, and the `keras.layers.RNN` layer
will handle the sequence iteration for you. It's an incredibly powerful way to quickly
prototype new kinds of RNNs (e.g. a LSTM variant).
For more details, please visit the [API docs](https://keras.io/api/layers/recurrent_layers/rnn/).
"""
| keras-io/guides/_working_with_rnns.py/0 | {
"file_path": "keras-io/guides/_working_with_rnns.py",
"repo_id": "keras-io",
"token_count": 6720
} | 116 |
<jupyter_start><jupyter_text>Getting started with Keras 3**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2023/07/10**Last modified:** 2023/07/10**Description:** First contact with the new multi-backend Keras. IntroductionKeras 3 is a full implementation of the Keras API thatworks with TensorFlow, JAX, and PyTorch interchangeably.This notebook will walk you through key Keras 3 workflows.First, let's install Keras 3:<jupyter_code>!pip install -q keras-core<jupyter_output><empty_output><jupyter_text>SetupWe're going to be using the JAX backend here -- but you canedit the string below to `"tensorflow"` or `"torch"` and hit"Restart runtime", and the whole notebook will run just the same!This entire guide is backend-agnostic.<jupyter_code>import numpy as np
import os
os.environ["KERAS_BACKEND"] = "jax"
# Note that keras should only be imported after the backend
# has been configured. The backend cannot be changed once the
# package is imported.
import keras<jupyter_output><empty_output><jupyter_text>A first example: A MNIST convnetLet's start with the Hello World of ML: training a convnetto classify MNIST digits.Here's the data:<jupyter_code># Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print("y_train shape:", y_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")<jupyter_output><empty_output><jupyter_text>Here's our model.Different model-building options that Keras offers include:- [The Sequential API](https://keras.io/guides/sequential_model/) (what we use below)- [The Functional API](https://keras.io/guides/functional_api/) (most typical)- [Writing your own models yourself via subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) (for advanced use cases)<jupyter_code># Model parameters
num_classes = 10
input_shape = (28, 28, 1)
model = keras.Sequential(
[
keras.layers.Input(shape=input_shape),
keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"),
keras.layers.GlobalAveragePooling2D(),
keras.layers.Dropout(0.5),
keras.layers.Dense(num_classes, activation="softmax"),
]
)<jupyter_output><empty_output><jupyter_text>Here's our model summary:<jupyter_code>model.summary()<jupyter_output><empty_output><jupyter_text>We use the `compile()` method to specify the optimizer, loss function,and the metrics to monitor. Note that with the JAX and TensorFlow backends,XLA compilation is turned on by default.<jupyter_code>model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
],
)<jupyter_output><empty_output><jupyter_text>Let's train and evaluate the model. We'll set aside a validation split of 15%of the data during training to monitor generalization on unseen data.<jupyter_code>batch_size = 128
epochs = 20
callbacks = [
keras.callbacks.ModelCheckpoint(filepath="model_at_epoch_{epoch}.keras"),
keras.callbacks.EarlyStopping(monitor="val_loss", patience=2),
]
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.15,
callbacks=callbacks,
)
score = model.evaluate(x_test, y_test, verbose=0)<jupyter_output><empty_output><jupyter_text>During training, we were saving a model at the end of each epoch. Youcan also save the model in its latest state like this:<jupyter_code>model.save("final_model.keras")<jupyter_output><empty_output><jupyter_text>And reload it like this:<jupyter_code>model = keras.saving.load_model("final_model.keras")<jupyter_output><empty_output><jupyter_text>Next, you can query predictions of class probabilities with `predict()`:<jupyter_code>predictions = model.predict(x_test)<jupyter_output><empty_output><jupyter_text>That's it for the basics! Writing cross-framework custom componentsKeras 3 enables you to write custom Layers, Models, Metrics, Losses, and Optimizersthat work across TensorFlow, JAX, and PyTorch with the same codebase. Let's take a lookat custom layers first.If you're already familiar with writing custom layers in `tf.keras` -- well, nothinghas changed. Except one thing: instead of using functions from the `tf` namespace, you should use functionsfrom `keras.ops.*`.The `keras.ops` namespace contains:- An implementation of the NumPy API, e.g. `keras.ops.stack` or `keras.ops.matmul`.- A set of neural network specific ops that are absent from NumPy, such as `keras.ops.conv`or `keras.ops.binary_crossentropy`.Let's make a custom `Dense` layer that works with all backends:<jupyter_code>class MyDense(keras.layers.Layer):
def __init__(self, units, activation=None, name=None):
super().__init__(name=name)
self.units = units
self.activation = keras.activations.get(activation)
def build(self, input_shape):
input_dim = input_shape[-1]
self.w = self.add_weight(
shape=(input_dim, self.units),
initializer=keras.initializers.GlorotNormal(),
name="kernel",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,),
initializer=keras.initializers.Zeros(),
name="bias",
trainable=True,
)
def call(self, inputs):
# Use Keras ops to create backend-agnostic layers/metrics/etc.
x = keras.ops.matmul(inputs, self.w) + self.b
return self.activation(x)<jupyter_output><empty_output><jupyter_text>Next, let's make a custom `Dropout` layer that relies on the `keras.random`namespace:<jupyter_code>class MyDropout(keras.layers.Layer):
def __init__(self, rate, name=None):
super().__init__(name=name)
self.rate = rate
# Use seed_generator for managing RNG state.
# It is a state element and its seed variable is
# tracked as part of `layer.variables`.
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, inputs):
# Use `keras.random` for random ops.
return keras.random.dropout(inputs, self.rate, seed=self.seed_generator)<jupyter_output><empty_output><jupyter_text>Next, let's write a custom subclassed model that uses our two custom layers:<jupyter_code>class MyModel(keras.Model):
def __init__(self, num_classes):
super().__init__()
self.conv_base = keras.Sequential(
[
keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"),
keras.layers.GlobalAveragePooling2D(),
]
)
self.dp = MyDropout(0.5)
self.dense = MyDense(num_classes, activation="softmax")
def call(self, x):
x = self.conv_base(x)
x = self.dp(x)
return self.dense(x)<jupyter_output><empty_output><jupyter_text>Let's compile it and fit it:<jupyter_code>model = MyModel(num_classes=10)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
],
)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=1, # For speed
validation_split=0.15,
)<jupyter_output><empty_output><jupyter_text>Training models on arbitrary data sourcesAll Keras models can be trained and evaluated on a wide variety of data sources,independently of the backend you're using. This includes:- NumPy arrays- Pandas dataframes- TensorFlow`tf.data.Dataset` objects- PyTorch `DataLoader` objects- Keras `PyDataset` objectsThey all work whether you're using TensorFlow, JAX, or PyTorch as your Keras backend.Let's try it out with PyTorch `DataLoaders`:<jupyter_code>import torch
# Create a TensorDataset
train_torch_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
val_torch_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_test), torch.from_numpy(y_test)
)
# Create a DataLoader
train_dataloader = torch.utils.data.DataLoader(
train_torch_dataset, batch_size=batch_size, shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
val_torch_dataset, batch_size=batch_size, shuffle=False
)
model = MyModel(num_classes=10)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
],
)
model.fit(train_dataloader, epochs=1, validation_data=val_dataloader)<jupyter_output><empty_output><jupyter_text>Now let's try this out with `tf.data`:<jupyter_code>import tensorflow as tf
train_dataset = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.batch(batch_size)
.prefetch(tf.data.AUTOTUNE)
)
test_dataset = (
tf.data.Dataset.from_tensor_slices((x_test, y_test))
.batch(batch_size)
.prefetch(tf.data.AUTOTUNE)
)
model = MyModel(num_classes=10)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
],
)
model.fit(train_dataset, epochs=1, validation_data=test_dataset)<jupyter_output><empty_output> | keras-io/guides/ipynb/keras_core/getting_started_with_keras_core.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/keras_core/getting_started_with_keras_core.ipynb",
"repo_id": "keras-io",
"token_count": 3984
} | 117 |
<jupyter_start><jupyter_text>Object Detection with KerasCV**Author:** [lukewood](https://twitter.com/luke_wood_ml), Ian Stenbit, Tirth Patel**Date created:** 2023/04/08**Last modified:** 2023/08/10**Description:** Train an object detection model with KerasCV. KerasCV offers a complete set of production grade APIs to solve object detectionproblems.These APIs include object-detection-specificdata augmentation techniques, Keras native COCO metrics, bounding box formatconversion utilities, visualization tools, pretrained object detection models,and everything you need to train your own state of the art object detectionmodels!Let's give KerasCV's object detection API a spin.<jupyter_code>!pip install -q --upgrade keras-cv
!pip install -q --upgrade keras # Upgrade to Keras 3.
import os
os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"]
from tensorflow import data as tf_data
import tensorflow_datasets as tfds
import keras
import keras_cv
import numpy as np
from keras_cv import bounding_box
import os
from keras_cv import visualization
import tqdm<jupyter_output><empty_output><jupyter_text>Object detection introductionObject detection is the process of identifying, classifying,and localizing objects within a given image. Typically, your inputs areimages, and your labels are bounding boxes with optional classlabels.Object detection can be thought of as an extension of classification, howeverinstead of one class label for the image, you must detect and localize anarbitrary number of classes.**For example:**The data for the above image may look something like this:```pythonimage = [height, width, 3]bounding_boxes = { "classes": [0], 0 is an arbitrary class ID representing "cat" "boxes": [[0.25, 0.4, .15, .1]] bounding box is in "rel_xywh" format so 0.25 represents the start of the bounding box 25% of the way across the image. The .15 represents that the width is 15% of the image width.}```Since the inception of [*You Only Look Once*](https://arxiv.org/abs/1506.02640)(aka YOLO),object detection has primarily been solved using deep learning.Most deep learning architectures do this by cleverly framing the object detectionproblem as a combination of many small classification problems andmany regression problems.More specifically, this is done by generating many anchor boxes of varyingshapes and sizes across the input images and assigning them each a class label,as well as `x`, `y`, `width` and `height` offsets.The model is trained to predict the class labels of each box, as well as the`x`, `y`, `width`, and `height` offsets of each box that is predicted to be anobject.**Visualization of some sample anchor boxes**:Objection detection is a technically complex problem but luckily we offer abulletproof approach to getting great results.Let's do this! Perform detections with a pretrained modelThe highest level API in the KerasCV Object Detection API is the `keras_cv.models` API.This API includes fully pretrained object detection models, such as`keras_cv.models.YOLOV8Detector`.Let's get started by constructing a YOLOV8Detector pretrained on the `pascalvoc`dataset.<jupyter_code>pretrained_model = keras_cv.models.YOLOV8Detector.from_preset(
"yolo_v8_m_pascalvoc", bounding_box_format="xywh"
)<jupyter_output><empty_output><jupyter_text>Notice the `bounding_box_format` argument?Recall in the section above, the format of bounding boxes:```bounding_boxes = { "classes": [num_boxes], "boxes": [num_boxes, 4]}```This argument describes *exactly* what format the values in the `"boxes"`field of the label dictionary take in your pipeline.For example, a box in `xywh` format with its top left corner at the coordinates(100, 100) with a width of 55 and a height of 70 would be represented by:```[100, 100, 55, 75]```or equivalently in `xyxy` format:```[100, 100, 155, 175]```While this may seem simple, it is a critical piece of the KerasCV objectdetection API!Every component that processes bounding boxes requires a`bounding_box_format` argument.You can read more aboutKerasCV bounding box formats [in the API docs](https://keras.io/api/keras_cv/bounding_box/formats/).This is done because there is no one correct format for bounding boxes!Components in different pipelines expect different formats, and so by requiringthem to be specified we ensure that our components remain readable, reusable,and clear.Box format conversion bugs are perhaps the most common bug surface in objectdetection pipelines - by requiring this parameter we mitigate against thesebugs (especially when combining code from many sources).Next let's load an image:<jupyter_code>filepath = keras.utils.get_file(origin="https://i.imgur.com/gCNcJJI.jpg")
image = keras.utils.load_img(filepath)
image = np.array(image)
visualization.plot_image_gallery(
np.array([image]),
value_range=(0, 255),
rows=1,
cols=1,
scale=5,
)<jupyter_output><empty_output><jupyter_text>To use the `YOLOV8Detector` architecture with a ResNet50 backbone, you'll need toresize your image to a size that is divisible by 64. This is to ensurecompatibility with the number of downscaling operations done by the convolutionlayers in the ResNet.If the resize operation distortsthe input's aspect ratio, the model will perform signficantly poorer. For thepretrained `"yolo_v8_m_pascalvoc"` preset we are using, the final`MeanAveragePrecision` on the `pascalvoc/2012` evaluation set drops to `0.15`from `0.38` when using a naive resizing operation.Additionally, if you crop to preserve the aspect ratio as you do in classificationyour model may entirely miss some bounding boxes. As such, when running inferenceon an object detection model we recommend the use of padding to the desired size,while resizing the longest size to match the aspect ratio.KerasCV makes resizing properly easy; simply pass `pad_to_aspect_ratio=True` toa `keras_cv.layers.Resizing` layer.This can be implemented in one line of code:<jupyter_code>inference_resizing = keras_cv.layers.Resizing(
640, 640, pad_to_aspect_ratio=True, bounding_box_format="xywh"
)<jupyter_output><empty_output><jupyter_text>This can be used as our inference preprocessing pipeline:<jupyter_code>image_batch = inference_resizing([image])<jupyter_output><empty_output><jupyter_text>`keras_cv.visualization.plot_bounding_box_gallery()` supports a `class_mapping`parameter to highlight what class each box was assigned to. Let's assemble aclass mapping now.<jupyter_code>class_ids = [
"Aeroplane",
"Bicycle",
"Bird",
"Boat",
"Bottle",
"Bus",
"Car",
"Cat",
"Chair",
"Cow",
"Dining Table",
"Dog",
"Horse",
"Motorbike",
"Person",
"Potted Plant",
"Sheep",
"Sofa",
"Train",
"Tvmonitor",
"Total",
]
class_mapping = dict(zip(range(len(class_ids)), class_ids))<jupyter_output><empty_output><jupyter_text>Just like any other `keras.Model` you can predict bounding boxes using the`model.predict()` API.<jupyter_code>y_pred = pretrained_model.predict(image_batch)
# y_pred is a bounding box Tensor:
# {"classes": ..., boxes": ...}
visualization.plot_bounding_box_gallery(
image_batch,
value_range=(0, 255),
rows=1,
cols=1,
y_pred=y_pred,
scale=5,
font_scale=0.7,
bounding_box_format="xywh",
class_mapping=class_mapping,
)<jupyter_output><empty_output><jupyter_text>In order to support this easy and intuitive inference workflow, KerasCVperforms non-max suppression inside of the `YOLOV8Detector` class.Non-max suppression is a traditional computing algorithm that solves the problemof a model detecting multiple boxes for the same object.Non-max suppression is a highly configurable algorithm, and in most cases youwill want to customize the settings of your model's non-maxsuppression operation.This can be done by overriding to the `prediction_decoder` argument.To show this concept off, let's temporarily disable non-max suppression on ourYOLOV8Detector. This can be done by writing to the `prediction_decoder` attribute.<jupyter_code># The following NonMaxSuppression layer is equivalent to disabling the operation
prediction_decoder = keras_cv.layers.NonMaxSuppression(
bounding_box_format="xywh",
from_logits=True,
iou_threshold=1.0,
confidence_threshold=0.0,
)
pretrained_model = keras_cv.models.YOLOV8Detector.from_preset(
"yolo_v8_m_pascalvoc",
bounding_box_format="xywh",
prediction_decoder=prediction_decoder,
)
y_pred = pretrained_model.predict(image_batch)
visualization.plot_bounding_box_gallery(
image_batch,
value_range=(0, 255),
rows=1,
cols=1,
y_pred=y_pred,
scale=5,
font_scale=0.7,
bounding_box_format="xywh",
class_mapping=class_mapping,
)<jupyter_output><empty_output><jupyter_text>Next, let's re-configure `keras_cv.layers.NonMaxSuppression` for ouruse case!In this case, we will tune the `iou_threshold` to `0.2`, and the`confidence_threshold` to `0.7`.Raising the `confidence_threshold` will cause the model to only output boxesthat have a higher confidence score. `iou_threshold` controls the threshold ofintersection over union (IoU) that two boxes must have in order for one to bepruned out.[More information on these parameters may be found in the TensorFlow API docs](https://www.tensorflow.org/api_docs/python/tf/image/combined_non_max_suppression)<jupyter_code>prediction_decoder = keras_cv.layers.NonMaxSuppression(
bounding_box_format="xywh",
from_logits=True,
# Decrease the required threshold to make predictions get pruned out
iou_threshold=0.2,
# Tune confidence threshold for predictions to pass NMS
confidence_threshold=0.7,
)
pretrained_model = keras_cv.models.YOLOV8Detector.from_preset(
"yolo_v8_m_pascalvoc",
bounding_box_format="xywh",
prediction_decoder=prediction_decoder,
)
y_pred = pretrained_model.predict(image_batch)
visualization.plot_bounding_box_gallery(
image_batch,
value_range=(0, 255),
rows=1,
cols=1,
y_pred=y_pred,
scale=5,
font_scale=0.7,
bounding_box_format="xywh",
class_mapping=class_mapping,
)<jupyter_output><empty_output><jupyter_text>That looks a lot better! Train a custom object detection modelWhether you're an object detection amateur or a well seasoned veteran, assemblingan object detection pipeline from scratch is a massive undertaking.Luckily, all KerasCV object detection APIs are built as modular components.Whether you need a complete pipeline, just an object detection model, or evenjust a conversion utility to transform your boxes from `xywh` format to `xyxy`,KerasCV has you covered.In this guide, we'll assemble a full training pipeline for a KerasCV objectdetection model. This includes data loading, augmentation, metric evaluation,and inference!To get started, let's sort out all of our imports and define globalconfiguration parameters.<jupyter_code>BATCH_SIZE = 4<jupyter_output><empty_output><jupyter_text>Data loadingTo get started, let's discuss data loading and bounding box formatting.KerasCV has a predefined format for bounding boxes.To comply with this, youshould package your bounding boxes into a dictionary matching thespecification below:```bounding_boxes = { num_boxes may be a Ragged dimension 'boxes': Tensor(shape=[batch, num_boxes, 4]), 'classes': Tensor(shape=[batch, num_boxes])}````bounding_boxes['boxes']` contains the coordinates of your bounding box in a KerasCVsupported `bounding_box_format`.KerasCV requires a `bounding_box_format` argument in all components that processbounding boxes.This is done to maximize your ability to plug and play individual componentsinto their object detection pipelines, as well as to make code self-documentingacross object detection pipelines.To match the KerasCV API style, it is recommended that when writing acustom data loader, you also support a `bounding_box_format` argument.This makes it clear to those invoking your data loader what format the bounding boxesare in.In this example, we format our boxes to `xywh` format.For example:```pythontrain_ds, ds_info = your_data_loader.load( split='train', bounding_box_format='xywh', batch_size=8)```This clearly yields bounding boxes in the format `xywh`. You can read more aboutKerasCV bounding box formats [in the API docs](https://keras.io/api/keras_cv/bounding_box/formats/).Our data comes loaded into the format`{"images": images, "bounding_boxes": bounding_boxes}`. This format issupported in all KerasCV preprocessing components.Let's load some data and verify that the data looks as we expect it to.<jupyter_code>def visualize_dataset(inputs, value_range, rows, cols, bounding_box_format):
inputs = next(iter(inputs.take(1)))
images, bounding_boxes = inputs["images"], inputs["bounding_boxes"]
visualization.plot_bounding_box_gallery(
images,
value_range=value_range,
rows=rows,
cols=cols,
y_true=bounding_boxes,
scale=5,
font_scale=0.7,
bounding_box_format=bounding_box_format,
class_mapping=class_mapping,
)
def unpackage_raw_tfds_inputs(inputs, bounding_box_format):
image = inputs["image"]
boxes = keras_cv.bounding_box.convert_format(
inputs["objects"]["bbox"],
images=image,
source="rel_yxyx",
target=bounding_box_format,
)
bounding_boxes = {
"classes": inputs["objects"]["label"],
"boxes": boxes,
}
return {"images": image, "bounding_boxes": bounding_boxes}
def load_pascal_voc(split, dataset, bounding_box_format):
ds = tfds.load(dataset, split=split, with_info=False, shuffle_files=True)
ds = ds.map(
lambda x: unpackage_raw_tfds_inputs(x, bounding_box_format=bounding_box_format),
num_parallel_calls=tf_data.AUTOTUNE,
)
return ds
train_ds = load_pascal_voc(
split="train", dataset="voc/2007", bounding_box_format="xywh"
)
eval_ds = load_pascal_voc(split="test", dataset="voc/2007", bounding_box_format="xywh")
train_ds = train_ds.shuffle(BATCH_SIZE * 4)<jupyter_output><empty_output><jupyter_text>Next, let's batch our data.In KerasCV object detection tasks it is recommended thatusers use ragged batches of inputs.This is due to the fact that images may be of different sizes in PascalVOC,as well as the fact that there may be different numbers of bounding boxes perimage.To construct a ragged dataset in a `tf.data` pipeline, you can use the`ragged_batch()` method.<jupyter_code>train_ds = train_ds.ragged_batch(BATCH_SIZE, drop_remainder=True)
eval_ds = eval_ds.ragged_batch(BATCH_SIZE, drop_remainder=True)<jupyter_output><empty_output><jupyter_text>Let's make sure our dataset is following the format KerasCV expects.By using the `visualize_dataset()` function, you can visually verifythat your data is in the format that KerasCV expects. If the bounding boxesare not visible or are visible in the wrong locations that is a sign that yourdata is mis-formatted.<jupyter_code>visualize_dataset(
train_ds, bounding_box_format="xywh", value_range=(0, 255), rows=2, cols=2
)<jupyter_output><empty_output><jupyter_text>And for the eval set:<jupyter_code>visualize_dataset(
eval_ds,
bounding_box_format="xywh",
value_range=(0, 255),
rows=2,
cols=2,
# If you are not running your experiment on a local machine, you can also
# make `visualize_dataset()` dump the plot to a file using `path`:
# path="eval.png"
)<jupyter_output><empty_output><jupyter_text>Looks like everything is structured as expected.Now we can move on to constructing ourdata augmentation pipeline. Data augmentationOne of the most challenging tasks when constructing object detectionpipelines is data augmentation. Image augmentation techniques must be aware of the underlyingbounding boxes, and must update them accordingly.Luckily, KerasCV natively supports bounding box augmentation with its extensivelibraryof [data augmentation layers](https://keras.io/api/keras_cv/layers/preprocessing/).The code below loads the Pascal VOC dataset, and performs on-the-fly,bounding-box-friendly data augmentation inside a `tf.data` pipeline.<jupyter_code>augmenters = [
keras_cv.layers.RandomFlip(mode="horizontal", bounding_box_format="xywh"),
keras_cv.layers.JitteredResize(
target_size=(640, 640), scale_factor=(0.75, 1.3), bounding_box_format="xywh"
),
]
def create_augmenter_fn(augmenters):
def augmenter_fn(inputs):
for augmenter in augmenters:
inputs = augmenter(inputs)
return inputs
return augmenter_fn
augmenter_fn = create_augmenter_fn(augmenters)
train_ds = train_ds.map(augmenter_fn, num_parallel_calls=tf_data.AUTOTUNE)
visualize_dataset(
train_ds, bounding_box_format="xywh", value_range=(0, 255), rows=2, cols=2
)<jupyter_output><empty_output><jupyter_text>Great! We now have a bounding-box-friendly data augmentation pipeline.Let's format our evaluation dataset to match. Instead of using`JitteredResize`, let's use the deterministic `keras_cv.layers.Resizing()`layer.<jupyter_code>inference_resizing = keras_cv.layers.Resizing(
640, 640, bounding_box_format="xywh", pad_to_aspect_ratio=True
)
eval_ds = eval_ds.map(inference_resizing, num_parallel_calls=tf_data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Due to the fact that the resize operation differs between the train dataset,which uses `JitteredResize()` to resize images, and the inference dataset, whichuses `layers.Resizing(pad_to_aspect_ratio=True)`, it is good practice tovisualize both datasets:<jupyter_code>visualize_dataset(
eval_ds, bounding_box_format="xywh", value_range=(0, 255), rows=2, cols=2
)<jupyter_output><empty_output><jupyter_text>Finally, let's unpackage our inputs from the preprocessing dictionary, andprepare to feed the inputs into our model. In order to be TPU compatible,bounding box Tensors need to be `Dense` instead of `Ragged`.<jupyter_code>def dict_to_tuple(inputs):
return inputs["images"], bounding_box.to_dense(
inputs["bounding_boxes"], max_boxes=32
)
train_ds = train_ds.map(dict_to_tuple, num_parallel_calls=tf_data.AUTOTUNE)
eval_ds = eval_ds.map(dict_to_tuple, num_parallel_calls=tf_data.AUTOTUNE)
train_ds = train_ds.prefetch(tf_data.AUTOTUNE)
eval_ds = eval_ds.prefetch(tf_data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>OptimizerIn this guide, we use a standard SGD optimizer and rely on the[`keras.callbacks.ReduceLROnPlateau`](https://keras.io/api/callbacks/reduce_lr_on_plateau/)callback to reduce the learning rate.You will always want to include a `global_clipnorm` when training objectdetection models. This is to remedy exploding gradient problems that frequentlyoccur when training object detection models.<jupyter_code>base_lr = 0.005
# including a global_clipnorm is extremely important in object detection tasks
optimizer = keras.optimizers.SGD(
learning_rate=base_lr, momentum=0.9, global_clipnorm=10.0
)<jupyter_output><empty_output><jupyter_text>To achieve the best results on your dataset, you'll likely want to hand craft a`PiecewiseConstantDecay` learning rate schedule.While `PiecewiseConstantDecay` schedules tend to perform better, they don'ttranslate between problems. Loss functionsYou may not be familiar with the `"ciou"` loss. While not common in othermodels, this loss is sometimes used in the object detection world.In short, ["Complete IoU"](https://arxiv.org/abs/1911.08287) is a flavour of the Intersection over Union loss and is used due to its convergence properties.In KerasCV, you can use this loss simply by passing the string `"ciou"` to `compile()`.We also use standard binary crossentropy loss for the class head.<jupyter_code>pretrained_model.compile(
classification_loss="binary_crossentropy",
box_loss="ciou",
)<jupyter_output><empty_output><jupyter_text>Metric evaluationThe most popular object detection metrics are COCO metrics,which were published alongside the MSCOCO dataset. KerasCV provides aneasy-to-use suite of COCO metrics under the `keras_cv.callbacks.PyCOCOCallback`symbol. Note that we use a Keras callback instead of a Keras metric to computeCOCO metrics. This is because computing COCO metrics requires storing all of amodel's predictions for the entire evaluation dataset in memory at once, whichis impractical to do during training time.<jupyter_code>coco_metrics_callback = keras_cv.callbacks.PyCOCOCallback(
eval_ds.take(20), bounding_box_format="xywh"
)<jupyter_output><empty_output><jupyter_text>Our data pipeline is now complete!We can now move on to model creation and training. Model creationNext, let's use the KerasCV API to construct an untrained YOLOV8Detector model.In this tutorial we use a pretrained ResNet50 backbone from the imagenetdataset.KerasCV makes it easy to construct a `YOLOV8Detector` with any of the KerasCVbackbones. Simply use one of the presets for the architecture you'd like!For example:<jupyter_code>model = keras_cv.models.YOLOV8Detector.from_preset(
"resnet50_imagenet",
# For more info on supported bounding box formats, visit
# https://keras.io/api/keras_cv/bounding_box/
bounding_box_format="xywh",
num_classes=20,
)<jupyter_output><empty_output><jupyter_text>That is all it takes to construct a KerasCV YOLOv8. The YOLOv8 acceptstuples of dense image Tensors and bounding box dictionaries to `fit()` and`train_on_batch()`This matches what we have constructed in our input pipeline above. Training our modelAll that is left to do is train our model. KerasCV object detection modelsfollow the standard Keras workflow, leveraging `compile()` and `fit()`.Let's compile our model:<jupyter_code>model.compile(
classification_loss="binary_crossentropy",
box_loss="ciou",
optimizer=optimizer,
)<jupyter_output><empty_output><jupyter_text>If you want to fully train the model, remove `.take(20)` from all datasetreferences (below and in the initialization of the metrics callback).<jupyter_code>model.fit(
train_ds.take(20),
# Run for 10-35~ epochs to achieve good scores.
epochs=1,
callbacks=[coco_metrics_callback],
)<jupyter_output><empty_output><jupyter_text>Inference and plotting resultsKerasCV makes object detection inference simple. `model.predict(images)`returns a tensor of bounding boxes. By default, `YOLOV8Detector.predict()`will perform a non max suppression operation for you.In this section, we will use a `keras_cv` provided preset:<jupyter_code>model = keras_cv.models.YOLOV8Detector.from_preset(
"yolo_v8_m_pascalvoc", bounding_box_format="xywh"
)<jupyter_output><empty_output><jupyter_text>Next, for convenience we construct a dataset with larger batches:<jupyter_code>visualization_ds = eval_ds.unbatch()
visualization_ds = visualization_ds.ragged_batch(16)
visualization_ds = visualization_ds.shuffle(8)<jupyter_output><empty_output><jupyter_text>Let's create a simple function to plot our inferences:<jupyter_code>def visualize_detections(model, dataset, bounding_box_format):
images, y_true = next(iter(dataset.take(1)))
y_pred = model.predict(images)
visualization.plot_bounding_box_gallery(
images,
value_range=(0, 255),
bounding_box_format=bounding_box_format,
y_true=y_true,
y_pred=y_pred,
scale=4,
rows=2,
cols=2,
show=True,
font_scale=0.7,
class_mapping=class_mapping,
)<jupyter_output><empty_output><jupyter_text>You may need to configure your NonMaxSuppression operation to achievevisually appealing results.<jupyter_code>model.prediction_decoder = keras_cv.layers.NonMaxSuppression(
bounding_box_format="xywh",
from_logits=True,
iou_threshold=0.5,
confidence_threshold=0.75,
)
visualize_detections(model, dataset=visualization_ds, bounding_box_format="xywh")<jupyter_output><empty_output><jupyter_text>Awesome!One final helpful pattern to be aware of is to visualizedetections in a `keras.callbacks.Callback` to monitor training :<jupyter_code>class VisualizeDetections(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
visualize_detections(
self.model, bounding_box_format="xywh", dataset=visualization_ds
)<jupyter_output><empty_output><jupyter_text>Takeaways and next stepsKerasCV makes it easy to construct state-of-the-art object detection pipelines.In this guide, we started off by writing a data loader using the KerasCVbounding box specification.Following this, we assembled a production grade data augmentation pipeline usingKerasCV preprocessing layers in <50 lines of code.KerasCV object detection components can be used independently, but also have deepintegration with each other.KerasCV makes authoring production grade bounding box augmentation,model training, visualization, andmetric evaluation easy.Some follow up exercises for the reader:- add additional augmentation techniques to improve model performance- tune the hyperparameters and data augmentation used to produce high quality results- train an object detection model on your own datasetOne last fun code snippet to showcase the power of KerasCV's API!<jupyter_code>stable_diffusion = keras_cv.models.StableDiffusionV2(512, 512)
images = stable_diffusion.text_to_image(
prompt="A zoomed out photograph of a cool looking cat. The cat stands in a beautiful forest",
negative_prompt="unrealistic, bad looking, malformed",
batch_size=4,
seed=1231,
)
encoded_predictions = model(images)
y_pred = model.decode_predictions(encoded_predictions, images)
visualization.plot_bounding_box_gallery(
images,
value_range=(0, 255),
y_pred=y_pred,
rows=2,
cols=2,
scale=5,
font_scale=0.7,
bounding_box_format="xywh",
class_mapping=class_mapping,
)<jupyter_output><empty_output> | keras-io/guides/ipynb/keras_cv/object_detection_keras_cv.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/keras_cv/object_detection_keras_cv.ipynb",
"repo_id": "keras-io",
"token_count": 8355
} | 118 |
<jupyter_start><jupyter_text>The Sequential model**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/04/12**Last modified:** 2023/06/25**Description:** Complete guide to the Sequential model. Setup<jupyter_code>import keras
from keras import layers
from keras import ops<jupyter_output><empty_output><jupyter_text>When to use a Sequential modelA `Sequential` model is appropriate for **a plain stack of layers**where each layer has **exactly one input tensor and one output tensor**.Schematically, the following `Sequential` model:<jupyter_code># Define Sequential model with 3 layers
model = keras.Sequential(
[
layers.Dense(2, activation="relu", name="layer1"),
layers.Dense(3, activation="relu", name="layer2"),
layers.Dense(4, name="layer3"),
]
)
# Call model on a test input
x = ops.ones((3, 3))
y = model(x)<jupyter_output><empty_output><jupyter_text>is equivalent to this function:<jupyter_code># Create 3 layers
layer1 = layers.Dense(2, activation="relu", name="layer1")
layer2 = layers.Dense(3, activation="relu", name="layer2")
layer3 = layers.Dense(4, name="layer3")
# Call layers on a test input
x = ops.ones((3, 3))
y = layer3(layer2(layer1(x)))<jupyter_output><empty_output><jupyter_text>A Sequential model is **not appropriate** when:- Your model has multiple inputs or multiple outputs- Any of your layers has multiple inputs or multiple outputs- You need to do layer sharing- You want non-linear topology (e.g. a residual connection, a multi-branchmodel) Creating a Sequential modelYou can create a Sequential model by passing a list of layers to the Sequentialconstructor:<jupyter_code>model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
)<jupyter_output><empty_output><jupyter_text>Its layers are accessible via the `layers` attribute:<jupyter_code>model.layers<jupyter_output><empty_output><jupyter_text>You can also create a Sequential model incrementally via the `add()` method:<jupyter_code>model = keras.Sequential()
model.add(layers.Dense(2, activation="relu"))
model.add(layers.Dense(3, activation="relu"))
model.add(layers.Dense(4))<jupyter_output><empty_output><jupyter_text>Note that there's also a corresponding `pop()` method to remove layers:a Sequential model behaves very much like a list of layers.<jupyter_code>model.pop()
print(len(model.layers)) # 2<jupyter_output><empty_output><jupyter_text>Also note that the Sequential constructor accepts a `name` argument, just likeany layer or model in Keras. This is useful to annotate TensorBoard graphswith semantically meaningful names.<jupyter_code>model = keras.Sequential(name="my_sequential")
model.add(layers.Dense(2, activation="relu", name="layer1"))
model.add(layers.Dense(3, activation="relu", name="layer2"))
model.add(layers.Dense(4, name="layer3"))<jupyter_output><empty_output><jupyter_text>Specifying the input shape in advanceGenerally, all layers in Keras need to know the shape of their inputsin order to be able to create their weights. So when you create a layer likethis, initially, it has no weights:<jupyter_code>layer = layers.Dense(3)
layer.weights # Empty<jupyter_output><empty_output><jupyter_text>It creates its weights the first time it is called on an input, since the shapeof the weights depends on the shape of the inputs:<jupyter_code># Call layer on a test input
x = ops.ones((1, 4))
y = layer(x)
layer.weights # Now it has weights, of shape (4, 3) and (3,)<jupyter_output><empty_output><jupyter_text>Naturally, this also applies to Sequential models. When you instantiate aSequential model without an input shape, it isn't "built": it has no weights(and calling`model.weights` results in an error stating just this). The weights are createdwhen the model first sees some input data:<jupyter_code>model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
) # No weights at this stage!
# At this point, you can't do this:
# model.weights
# You also can't do this:
# model.summary()
# Call the model on a test input
x = ops.ones((1, 4))
y = model(x)
print("Number of weights after calling the model:", len(model.weights)) # 6<jupyter_output><empty_output><jupyter_text>Once a model is "built", you can call its `summary()` method to display itscontents:<jupyter_code>model.summary()<jupyter_output><empty_output><jupyter_text>However, it can be very useful when building a Sequential model incrementallyto be able to display the summary of the model so far, including the currentoutput shape. In this case, you should start your model by passing an `Input`object to your model, so that it knows its input shape from the start:<jupyter_code>model = keras.Sequential()
model.add(keras.Input(shape=(4,)))
model.add(layers.Dense(2, activation="relu"))
model.summary()<jupyter_output><empty_output><jupyter_text>Note that the `Input` object is not displayed as part of `model.layers`, sinceit isn't a layer:<jupyter_code>model.layers<jupyter_output><empty_output><jupyter_text>Models built with a predefined input shape like this always have weights (evenbefore seeing any data) and always have a defined output shape.In general, it's a recommended best practice to always specify the input shapeof a Sequential model in advance if you know what it is. A common debugging workflow: `add()` + `summary()`When building a new Sequential architecture, it's useful to incrementally stacklayers with `add()` and frequently print model summaries. For instance, thisenables you to monitor how a stack of `Conv2D` and `MaxPooling2D` layers isdownsampling image feature maps:<jupyter_code>model = keras.Sequential()
model.add(keras.Input(shape=(250, 250, 3))) # 250x250 RGB images
model.add(layers.Conv2D(32, 5, strides=2, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
# Can you guess what the current output shape is at this point? Probably not.
# Let's just print it:
model.summary()
# The answer was: (40, 40, 32), so we can keep downsampling...
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(2))
# And now?
model.summary()
# Now that we have 4x4 feature maps, time to apply global max pooling.
model.add(layers.GlobalMaxPooling2D())
# Finally, we add a classification layer.
model.add(layers.Dense(10))<jupyter_output><empty_output><jupyter_text>Very practical, right? What to do once you have a modelOnce your model architecture is ready, you will want to:- Train your model, evaluate it, and run inference. See our[guide to training & evaluation with the built-in loops]( /guides/training_with_built_in_methods/)- Save your model to disk and restore it. See our[guide to serialization & saving](/guides/serialization_and_saving/). Feature extraction with a Sequential modelOnce a Sequential model has been built, it behaves like a[Functional API model](/guides/functional_api/).This means that every layer has an `input`and `output` attribute. These attributes can be used to do neat things, likequickly creating a model that extracts the outputs of all intermediate layers in aSequential model:<jupyter_code>initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=[layer.output for layer in initial_model.layers],
)
# Call feature extractor on test input.
x = ops.ones((1, 250, 250, 3))
features = feature_extractor(x)<jupyter_output><empty_output><jupyter_text>Here's a similar example that only extract features from one layer:<jupyter_code>initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu", name="my_intermediate_layer"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=initial_model.get_layer(name="my_intermediate_layer").output,
)
# Call feature extractor on test input.
x = ops.ones((1, 250, 250, 3))
features = feature_extractor(x)<jupyter_output><empty_output> | keras-io/guides/ipynb/sequential_model.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/sequential_model.ipynb",
"repo_id": "keras-io",
"token_count": 2868
} | 119 |
# Customizing Saving and Serialization
**Author:** Neel Kovelamudi<br>
**Date created:** 2023/03/15<br>
**Last modified:** 2023/03/15<br>
**Description:** A more advanced guide on customizing saving for your layers and models.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/customizing_saving_and_serialization.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/customizing_saving_and_serialization.py)
---
## Introduction
This guide covers advanced methods that can be customized in Keras saving. For most
users, the methods outlined in the primary
[Serialize, save, and export guide](https://keras.io/guides/serialization_and_saving)
are sufficient.
### APIs
We will cover the following APIs:
- `save_assets()` and `load_assets()`
- `save_own_variables()` and `load_own_variables()`
- `get_build_config()` and `build_from_config()`
- `get_compile_config()` and `compile_from_config()`
When restoring a model, these get executed in the following order:
- `build_from_config()`
- `compile_from_config()`
- `load_own_variables()`
- `load_assets()`
---
## Setup
```python
import os
import numpy as np
import keras
```
---
## State saving customization
These methods determine how the state of your model's layers is saved when calling
`model.save()`. You can override them to take full control of the state saving process.
### `save_own_variables()` and `load_own_variables()`
These methods save and load the state variables of the layer when `model.save()` and
`keras.models.load_model()` are called, respectively. By default, the state variables
saved and loaded are the weights of the layer (both trainable and non-trainable). Here is
the default implementation of `save_own_variables()`:
```python
def save_own_variables(self, store):
all_vars = self._trainable_weights + self._non_trainable_weights
for i, v in enumerate(all_vars):
store[f"{i}"] = v.numpy()
```
The store used by these methods is a dictionary that can be populated with the layer
variables. Let's take a look at an example customizing this.
**Example:**
```python
@keras.utils.register_keras_serializable(package="my_custom_package")
class LayerWithCustomVariable(keras.layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
self.my_variable = keras.Variable(
np.random.random((units,)), name="my_variable", dtype="float32"
)
def save_own_variables(self, store):
super().save_own_variables(store)
# Stores the value of the variable upon saving
store["variables"] = self.my_variable.numpy()
def load_own_variables(self, store):
# Assigns the value of the variable upon loading
self.my_variable.assign(store["variables"])
# Load the remaining weights
for i, v in enumerate(self.weights):
v.assign(store[f"{i}"])
# Note: You must specify how all variables (including layer weights)
# are loaded in `load_own_variables.`
def call(self, inputs):
dense_out = super().call(inputs)
return dense_out + self.my_variable
model = keras.Sequential([LayerWithCustomVariable(1)])
ref_input = np.random.random((8, 10))
ref_output = np.random.random((8, 10))
model.compile(optimizer="adam", loss="mean_squared_error")
model.fit(ref_input, ref_output)
model.save("custom_vars_model.keras")
restored_model = keras.models.load_model("custom_vars_model.keras")
np.testing.assert_allclose(
model.layers[0].my_variable.numpy(),
restored_model.layers[0].my_variable.numpy(),
)
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 101ms/step - loss: 0.2908
```
</div>
### `save_assets()` and `load_assets()`
These methods can be added to your model class definition to store and load any
additional information that your model needs.
For example, NLP domain layers such as TextVectorization layers and IndexLookup layers
may need to store their associated vocabulary (or lookup table) in a text file upon
saving.
Let's take at the basics of this workflow with a simple file `assets.txt`.
**Example:**
```python
@keras.saving.register_keras_serializable(package="my_custom_package")
class LayerWithCustomAssets(keras.layers.Dense):
def __init__(self, vocab=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.vocab = vocab
def save_assets(self, inner_path):
# Writes the vocab (sentence) to text file at save time.
with open(os.path.join(inner_path, "vocabulary.txt"), "w") as f:
f.write(self.vocab)
def load_assets(self, inner_path):
# Reads the vocab (sentence) from text file at load time.
with open(os.path.join(inner_path, "vocabulary.txt"), "r") as f:
text = f.read()
self.vocab = text.replace("<unk>", "little")
model = keras.Sequential(
[LayerWithCustomAssets(vocab="Mary had a <unk> lamb.", units=5)]
)
x = np.random.random((10, 10))
y = model(x)
model.save("custom_assets_model.keras")
restored_model = keras.models.load_model("custom_assets_model.keras")
np.testing.assert_string_equal(
restored_model.layers[0].vocab, "Mary had a little lamb."
)
```
---
## `build` and `compile` saving customization
### `get_build_config()` and `build_from_config()`
These methods work together to save the layer's built states and restore them upon
loading.
By default, this only includes a build config dictionary with the layer's input shape,
but overriding these methods can be used to include further Variables and Lookup Tables
that can be useful to restore for your built model.
**Example:**
```python
@keras.saving.register_keras_serializable(package="my_custom_package")
class LayerWithCustomBuild(keras.layers.Layer):
def __init__(self, units=32, **kwargs):
super().__init__(**kwargs)
self.units = units
def call(self, inputs):
return keras.ops.matmul(inputs, self.w) + self.b
def get_config(self):
return dict(units=self.units, **super().get_config())
def build(self, input_shape, layer_init):
# Note the overriding of `build()` to add an extra argument.
# Therefore, we will need to manually call build with `layer_init` argument
# before the first execution of `call()`.
super().build(input_shape)
self._input_shape = input_shape
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer=layer_init,
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,),
initializer=layer_init,
trainable=True,
)
self.layer_init = layer_init
def get_build_config(self):
build_config = {
"layer_init": self.layer_init,
"input_shape": self._input_shape,
} # Stores our initializer for `build()`
return build_config
def build_from_config(self, config):
# Calls `build()` with the parameters at loading time
self.build(config["input_shape"], config["layer_init"])
custom_layer = LayerWithCustomBuild(units=16)
custom_layer.build(input_shape=(8,), layer_init="random_normal")
model = keras.Sequential(
[
custom_layer,
keras.layers.Dense(1, activation="sigmoid"),
]
)
x = np.random.random((16, 8))
y = model(x)
model.save("custom_build_model.keras")
restored_model = keras.models.load_model("custom_build_model.keras")
np.testing.assert_equal(restored_model.layers[0].layer_init, "random_normal")
np.testing.assert_equal(restored_model.built, True)
```
### `get_compile_config()` and `compile_from_config()`
These methods work together to save the information with which the model was compiled
(optimizers, losses, etc.) and restore and re-compile the model with this information.
Overriding these methods can be useful for compiling the restored model with custom
optimizers, custom losses, etc., as these will need to be deserialized prior to calling
`model.compile` in `compile_from_config()`.
Let's take a look at an example of this.
**Example:**
```python
@keras.saving.register_keras_serializable(package="my_custom_package")
def small_square_sum_loss(y_true, y_pred):
loss = keras.ops.square(y_pred - y_true)
loss = loss / 10.0
loss = keras.ops.sum(loss, axis=1)
return loss
@keras.saving.register_keras_serializable(package="my_custom_package")
def mean_pred(y_true, y_pred):
return keras.ops.mean(y_pred)
@keras.saving.register_keras_serializable(package="my_custom_package")
class ModelWithCustomCompile(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense1 = keras.layers.Dense(8, activation="relu")
self.dense2 = keras.layers.Dense(4, activation="softmax")
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
def compile(self, optimizer, loss_fn, metrics):
super().compile(optimizer=optimizer, loss=loss_fn, metrics=metrics)
self.model_optimizer = optimizer
self.loss_fn = loss_fn
self.loss_metrics = metrics
def get_compile_config(self):
# These parameters will be serialized at saving time.
return {
"model_optimizer": self.model_optimizer,
"loss_fn": self.loss_fn,
"metric": self.loss_metrics,
}
def compile_from_config(self, config):
# Deserializes the compile parameters (important, since many are custom)
optimizer = keras.utils.deserialize_keras_object(config["model_optimizer"])
loss_fn = keras.utils.deserialize_keras_object(config["loss_fn"])
metrics = keras.utils.deserialize_keras_object(config["metric"])
# Calls compile with the deserialized parameters
self.compile(optimizer=optimizer, loss_fn=loss_fn, metrics=metrics)
model = ModelWithCustomCompile()
model.compile(
optimizer="SGD", loss_fn=small_square_sum_loss, metrics=["accuracy", mean_pred]
)
x = np.random.random((4, 8))
y = np.random.random((4,))
model.fit(x, y)
model.save("custom_compile_model.keras")
restored_model = keras.models.load_model("custom_compile_model.keras")
np.testing.assert_equal(model.model_optimizer, restored_model.model_optimizer)
np.testing.assert_equal(model.loss_fn, restored_model.loss_fn)
np.testing.assert_equal(model.loss_metrics, restored_model.loss_metrics)
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - accuracy: 0.0000e+00 - loss: 0.0627 - mean_metric_wrapper: 0.2500
```
</div>
---
## Conclusion
Using the methods learned in this tutorial allows for a wide variety of use cases,
allowing the saving and loading of complex models with exotic assets and state
elements. To recap:
- `save_own_variables` and `load_own_variables` determine how your states are saved
and loaded.
- `save_assets` and `load_assets` can be added to store and load any additional
information your model needs.
- `get_build_config` and `build_from_config` save and restore the model's built
states.
- `get_compile_config` and `compile_from_config` save and restore the model's
compiled states.
| keras-io/guides/md/customizing_saving_and_serialization.md/0 | {
"file_path": "keras-io/guides/md/customizing_saving_and_serialization.md",
"repo_id": "keras-io",
"token_count": 4252
} | 120 |
# Segment Anything in KerasCV!
**Author:** Tirth Patel, Ian Stenbit<br>
**Date created:** 2023/12/04<br>
**Last modified:** 2023/12/19<br>
**Description:** Segment anything using text, box, and points prompts in KerasCV.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_cv/segment_anything_in_keras_cv.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_cv/segment_anything_in_keras_cv.py)
---
## Overview
The Segment Anything Model (SAM) produces high quality object masks from input prompts
such as points or boxes, and it can be used to generate masks for all objects in an
image. It has been trained on a
[dataset](https://segment-anything.com/dataset/index.html) of 11 million images and 1.1
billion masks, and has strong zero-shot performance on a variety of segmentation tasks.
In this guide, we will show how to use KerasCV's implementation of the
[Segment Anything Model](https://github.com/facebookresearch/segment-anything)
and show how powerful TensorFlow's and JAX's performance boost is.
First, let's get all our dependencies and images for our demo.
```python
!pip install -Uq keras-cv
!pip install -Uq keras
```
```python
!wget -q https://raw.githubusercontent.com/facebookresearch/segment-anything/main/notebooks/images/truck.jpg
```
---
## Choose your backend
With Keras 3, you can choose to use your favorite backend!
```python
import os
os.environ["KERAS_BACKEND"] = "jax"
import timeit
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras import ops
import keras_cv
```
---
## Helper functions
Let's define some helper functions for visulazing the images, prompts, and the
segmentation results.
```python
def show_mask(mask, ax, random_color=False):
if random_color:
color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
else:
color = np.array([30 / 255, 144 / 255, 255 / 255, 0.6])
h, w = mask.shape[-2:]
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
ax.imshow(mask_image)
def show_points(coords, labels, ax, marker_size=375):
pos_points = coords[labels == 1]
neg_points = coords[labels == 0]
ax.scatter(
pos_points[:, 0],
pos_points[:, 1],
color="green",
marker="*",
s=marker_size,
edgecolor="white",
linewidth=1.25,
)
ax.scatter(
neg_points[:, 0],
neg_points[:, 1],
color="red",
marker="*",
s=marker_size,
edgecolor="white",
linewidth=1.25,
)
def show_box(box, ax):
box = box.reshape(-1)
x0, y0 = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1]
ax.add_patch(
plt.Rectangle((x0, y0), w, h, edgecolor="green", facecolor=(0, 0, 0, 0), lw=2)
)
def inference_resizing(image, pad=True):
# Compute Preprocess Shape
image = ops.cast(image, dtype="float32")
old_h, old_w = image.shape[0], image.shape[1]
scale = 1024 * 1.0 / max(old_h, old_w)
new_h = old_h * scale
new_w = old_w * scale
preprocess_shape = int(new_h + 0.5), int(new_w + 0.5)
# Resize the image
image = ops.image.resize(image[None, ...], preprocess_shape)[0]
# Pad the shorter side
if pad:
pixel_mean = ops.array([123.675, 116.28, 103.53])
pixel_std = ops.array([58.395, 57.12, 57.375])
image = (image - pixel_mean) / pixel_std
h, w = image.shape[0], image.shape[1]
pad_h = 1024 - h
pad_w = 1024 - w
image = ops.pad(image, [(0, pad_h), (0, pad_w), (0, 0)])
# KerasCV now rescales the images and normalizes them.
# Just unnormalize such that when KerasCV normalizes them
# again, the padded values map to 0.
image = image * pixel_std + pixel_mean
return image
```
---
## Get the pretrained SAM model
We can initialize a trained SAM model using KerasCV's `from_preset` factory method. Here,
we use the huge ViT backbone trained on the SA-1B dataset (`sam_huge_sa1b`) for
high-quality segmentation masks. You can also use one of the `sam_large_sa1b` or
`sam_base_sa1b` for better performance (at the cost of decreasing quality of segmentation
masks).
```python
model = keras_cv.models.SegmentAnythingModel.from_preset("sam_huge_sa1b")
```
---
## Understanding Prompts
Segment Anything allows prompting an image using points, boxes, and masks:
1. Point prompts are the most basic of all: the model tries to guess the object given a
point on an image. The point can either be a foreground point (i.e. the desired
segmentation mask contains the point in it) or a backround point (i.e. the point lies
outside the desired mask).
2. Another way to prompt the model is using boxes. Given a bounding box, the model tries
to segment the object contained in it.
3. Finally, the model can also be prompted using a mask itself. This is useful, for
instance, to refine the borders of a previously predicted or known segmentation mask.
What makes the model incredibly powerful is the ability to combine the prompts above.
Point, box, and mask prompts can be combined in several different ways to achieve the
best result.
Let's see the semantics of passing these prompts to the Segment Anything model in
KerasCV. Input to the SAM model is a dictionary with keys:
1. `"images"`: A batch of images to segment. Must be of shape `(B, 1024, 1024, 3)`.
2. `"points"`: A batch of point prompts. Each point is an `(x, y)` coordinate originating
from the top-left corner of the image. In other works, each point is of the form `(r, c)`
where `r` and `c` are the row and column of the pixel in the image. Must be of shape `(B,
N, 2)`.
3. `"labels"`: A batch of labels for the given points. `1` represents foreground points
and `0` represents background points. Must be of shape `(B, N)`.
4. `"boxes"`: A batch of boxes. Note that the model only accepts one box per batch.
Hence, the expected shape is `(B, 1, 2, 2)`. Each box is a collection of 2 points: the
top left corner and the bottom right corner of the box. The points here follow the same
semantics as the point prompts. Here the `1` in the second dimension represents the
presence of box prompts. If the box prompts are missing, a placeholder input of shape
`(B, 0, 2, 2)` must be passed.
5. `"masks"`: A batch of masks. Just like box prompts, only one mask prompt per image is
allowed. The shape of the input mask must be `(B, 1, 256, 256, 1)` if they are present
and `(B, 0, 256, 256, 1)` for missing mask prompt.
Placeholder prompts are only required when calling the model directly (i.e.
`model(...)`). When calling the `predict` method, missing prompts can be omitted from the
input dictionary.
---
## Point prompts
First, let's segment an image using point prompts. We load the image and resize it to
shape `(1024, 1024)`, the image size the pretrained SAM model expects.
```python
# Load our image
image = np.array(keras.utils.load_img("truck.jpg"))
image = inference_resizing(image)
plt.figure(figsize=(10, 10))
plt.imshow(ops.convert_to_numpy(image) / 255.0)
plt.axis("on")
plt.show()
```

Next, we will define the point on the object we want to segment. Let's try to segment the
truck's window pane at coordinates `(284, 213)`.
```python
# Define the input point prompt
input_point = np.array([[284, 213.5]])
input_label = np.array([1])
plt.figure(figsize=(10, 10))
plt.imshow(ops.convert_to_numpy(image) / 255.0)
show_points(input_point, input_label, plt.gca())
plt.axis("on")
plt.show()
```

Now let's call the `predict` method of our model to get the segmentation masks.
**Note**: We don't call the model directly (`model(...)`) since placeholder prompts are
required to do so. Missing prompts are handled automatically by the predict method so we
call it instead. Also, when no box prompts are present, the points and labels need to be
padded with a zero point prompt and `-1` label prompt respectively. The cell below
demonstrates how this works.
```python
outputs = model.predict(
{
"images": image[np.newaxis, ...],
"points": np.concatenate(
[input_point[np.newaxis, ...], np.zeros((1, 1, 2))], axis=1
),
"labels": np.concatenate(
[input_label[np.newaxis, ...], np.full((1, 1), fill_value=-1)], axis=1
),
}
)
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 48s 48s/step
```
</div>
`SegmentAnythingModel.predict` returns two outputs. First are logits (segmentation masks)
of shape `(1, 4, 256, 256)` and the other are the IoU confidence scores (of shape `(1,
4)`) for each mask predicted. The pretrained SAM model predicts four masks: the first is
the best mask the model could come up with for the given prompts, and the other 3 are the
alternative masks which can be used in case the best prediction doesn't contain the
desired object. The user can choose whichever mask they prefer.
Let's visualize the masks returned by the model!
```python
# Resize the mask to our image shape i.e. (1024, 1024)
mask = inference_resizing(outputs["masks"][0][0][..., None], pad=False)[..., 0]
# Convert the logits to a numpy array
# and convert the logits to a boolean mask
mask = ops.convert_to_numpy(mask) > 0.0
iou_score = ops.convert_to_numpy(outputs["iou_pred"][0][0])
plt.figure(figsize=(10, 10))
plt.imshow(ops.convert_to_numpy(image) / 255.0)
show_mask(mask, plt.gca())
show_points(input_point, input_label, plt.gca())
plt.title(f"IoU Score: {iou_score:.3f}", fontsize=18)
plt.axis("off")
plt.show()
```

As expected, the model returns a segmentation mask for the truck's window pane. But, our
point prompt can also mean a range of other things. For example, another possible mask
that contains our point is just the right side of the window pane or the whole truck.
Let's also visualize the other masks the model has predicted.
```python
fig, ax = plt.subplots(1, 3, figsize=(20, 60))
masks, scores = outputs["masks"][0][1:], outputs["iou_pred"][0][1:]
for i, (mask, score) in enumerate(zip(masks, scores)):
mask = inference_resizing(mask[..., None], pad=False)[..., 0]
mask, score = map(ops.convert_to_numpy, (mask, score))
mask = 1 * (mask > 0.0)
ax[i].imshow(ops.convert_to_numpy(image) / 255.0)
show_mask(mask, ax[i])
show_points(input_point, input_label, ax[i])
ax[i].set_title(f"Mask {i+1}, Score: {score:.3f}", fontsize=12)
ax[i].axis("off")
plt.show()
```

Nice! SAM was able to capture the ambiguity of our point prompt and also returned other
possible segmentation masks.
---
## Box Prompts
Now, let's see how we can prompt the model using boxes. The box is specified using two
points, the top-left corner and the bottom-right corner of the bounding box in xyxy
format. Let's prompt the model using a bounding box around the left front tyre of the
truck.
```python
# Let's specify the box
input_box = np.array([[240, 340], [400, 500]])
outputs = model.predict(
{"images": image[np.newaxis, ...], "boxes": input_box[np.newaxis, np.newaxis, ...]}
)
mask = inference_resizing(outputs["masks"][0][0][..., None], pad=False)[..., 0]
mask = ops.convert_to_numpy(mask) > 0.0
plt.figure(figsize=(10, 10))
plt.imshow(ops.convert_to_numpy(image) / 255.0)
show_mask(mask, plt.gca())
show_box(input_box, plt.gca())
plt.axis("off")
plt.show()
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 13s 13s/step
```
</div>

Boom! The model perfectly segments out the left front tyre in our bounding box.
---
## Combining prompts
To get the true potential of the model out, let's combine box and point prompts and see
what the model does.
```python
# Let's specify the box
input_box = np.array([[240, 340], [400, 500]])
# Let's specify the point and mark it background
input_point = np.array([[325, 425]])
input_label = np.array([0])
outputs = model.predict(
{
"images": image[np.newaxis, ...],
"points": input_point[np.newaxis, ...],
"labels": input_label[np.newaxis, ...],
"boxes": input_box[np.newaxis, np.newaxis, ...],
}
)
mask = inference_resizing(outputs["masks"][0][0][..., None], pad=False)[..., 0]
mask = ops.convert_to_numpy(mask) > 0.0
plt.figure(figsize=(10, 10))
plt.imshow(ops.convert_to_numpy(image) / 255.0)
show_mask(mask, plt.gca())
show_box(input_box, plt.gca())
show_points(input_point, input_label, plt.gca())
plt.axis("off")
plt.show()
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 16s 16s/step
```
</div>

Voila! The model understood that the object we wanted to exclude from our mask was the
rim of the tyre.
---
## Text prompts
Finally, let's see how text prompts can be used along with KerasCV's
`SegmentAnythingModel`.
For this demo, we will use the
[offical Grounding DINO model](https://github.com/IDEA-Research/GroundingDINO).
Grounding DINO is a model that
takes as input a `(image, text)` pair and generates a bounding box around the object in
the `image` described by the `text`. You can refer to the
[paper](https://arxiv.org/abs/2303.05499) for more details on the implementation of the
model.
For this part of the demo, we will need to install the `groundingdino` package from
source:
```
pip install -U git+https://github.com/IDEA-Research/GroundingDINO.git
```
Then, we can install the pretrained model's weights and config:
```python
!wget -q https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth
!wget -q https://raw.githubusercontent.com/IDEA-Research/GroundingDINO/v0.1.0-alpha2/groundingdino/config/GroundingDINO_SwinT_OGC.py
```
```python
from groundingdino.util.inference import Model as GroundingDINO
CONFIG_PATH = "GroundingDINO_SwinT_OGC.py"
WEIGHTS_PATH = "groundingdino_swint_ogc.pth"
grounding_dino = GroundingDINO(CONFIG_PATH, WEIGHTS_PATH)
```
<div class="k-default-codeblock">
```
/home/tirthp/oss/virtualenvs/keras-io-dev/lib/python3.10/site-packages/torch/functional.py:504: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:3526.)
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
final text_encoder_type: bert-base-uncased
```
</div>
Let's load an image of a dog for this part!
```python
filepath = keras.utils.get_file(
origin="https://storage.googleapis.com/keras-cv/test-images/mountain-dog.jpeg"
)
image = np.array(keras.utils.load_img(filepath))
image = ops.convert_to_numpy(inference_resizing(image))
plt.figure(figsize=(10, 10))
plt.imshow(image / 255.0)
plt.axis("on")
plt.show()
```
<div class="k-default-codeblock">
```
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
```
</div>

We first predict the bounding box of the object we want to segment using the Grounding
DINO model. Then, we prompt the SAM model using the bounding box to get the segmentation
mask.
Let's try to segment out the harness of the dog. Change the image and text below to
segment whatever you want using text from your image!
```python
# Let's predict the bounding box for the harness of the dog
boxes = grounding_dino.predict_with_caption(image.astype(np.uint8), "harness")
boxes = np.array(boxes[0].xyxy)
outputs = model.predict(
{
"images": np.repeat(image[np.newaxis, ...], boxes.shape[0], axis=0),
"boxes": boxes.reshape(-1, 1, 2, 2),
},
batch_size=1,
)
```
<div class="k-default-codeblock">
```
/home/tirthp/oss/virtualenvs/keras-io-dev/lib/python3.10/site-packages/transformers/modeling_utils.py:942: FutureWarning: The `device` argument is deprecated and will be removed in v5 of Transformers.
warnings.warn(
/home/tirthp/oss/virtualenvs/keras-io-dev/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
warnings.warn(
/home/tirthp/oss/virtualenvs/keras-io-dev/lib/python3.10/site-packages/torch/utils/checkpoint.py:61: UserWarning: None of the inputs have requires_grad=True. Gradients will be None
warnings.warn(
1/1 ━━━━━━━━━━━━━━━━━━━━ 13s 13s/step
```
</div>
And that's it! We got a segmentation mask for our text prompt using the combination of
Gounding DINO + SAM! This is a very powerful technique to combine different models to
expand the applications!
Let's visualize the results.
```python
plt.figure(figsize=(10, 10))
plt.imshow(image / 255.0)
for mask in outputs["masks"]:
mask = inference_resizing(mask[0][..., None], pad=False)[..., 0]
mask = ops.convert_to_numpy(mask) > 0.0
show_mask(mask, plt.gca())
show_box(boxes, plt.gca())
plt.axis("off")
plt.show()
```
<div class="k-default-codeblock">
```
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
```
</div>

---
## Optimizing SAM
You can use `mixed_float16` or `bfloat16` dtype policies to gain huge speedups and memory
optimizations at releatively low precision loss.
```python
# Load our image
image = np.array(keras.utils.load_img("truck.jpg"))
image = inference_resizing(image)
# Specify the prompt
input_box = np.array([[240, 340], [400, 500]])
# Let's first see how fast the model is with float32 dtype
time_taken = timeit.repeat(
'model.predict({"images": image[np.newaxis, ...], "boxes": input_box[np.newaxis, np.newaxis, ...]}, verbose=False)',
repeat=3,
number=3,
globals=globals(),
)
print(f"Time taken with float32 dtype: {min(time_taken) / 3:.10f}s")
# Set the dtype policy in Keras
keras.mixed_precision.set_global_policy("mixed_float16")
model = keras_cv.models.SegmentAnythingModel.from_preset("sam_huge_sa1b")
time_taken = timeit.repeat(
'model.predict({"images": image[np.newaxis, ...], "boxes": input_box[np.newaxis, np.newaxis, ...]}, verbose=False)',
repeat=3,
number=3,
globals=globals(),
)
print(f"Time taken with float16 dtype: {min(time_taken) / 3:.10f}s")
```
<div class="k-default-codeblock">
```
Time taken with float32 dtype: 0.5304666963s
Time taken with float16 dtype: 0.1586400040s
```
</div>
Here's a comparison of KerasCV's implementation with the original PyTorch
implementation!

The script used to generate the benchmarks is present
[here](https://github.com/tirthasheshpatel/segment_anything_keras/blob/main/Segment_Anything_Benchmarks.ipynb).
---
## Conclusion
KerasCV's `SegmentAnythingModel` supports a variety of applications and, with the help of
Keras 3, enables running the model on TensorFlow, JAX, and PyTorch! With the help of XLA
in JAX and TensorFlow, the model runs several times faster than the original
implementation. Moreover, using Keras's mixed precision support helps optimize memory use
and computation time with just one line of code!
For more advanced uses, check out the
[Automatic Mask Generator demo](https://github.com/tirthasheshpatel/segment_anything_keras/blob/main/Segment_Anything_Automatic_Mask_Generator_Demo.ipynb).
| keras-io/guides/md/keras_cv/segment_anything_in_keras_cv.md/0 | {
"file_path": "keras-io/guides/md/keras_cv/segment_anything_in_keras_cv.md",
"repo_id": "keras-io",
"token_count": 7469
} | 121 |
# Training & evaluation with the built-in methods
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2019/03/01<br>
**Last modified:** 2023/06/25<br>
**Description:** Complete guide to training & evaluation with `fit()` and `evaluate()`.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/training_with_built_in_methods.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/training_with_built_in_methods.py)
---
## Setup
```python
# We import torch & TF so as to use torch Dataloaders & tf.data.Datasets.
import torch
import tensorflow as tf
import os
import numpy as np
import keras
from keras import layers
from keras import ops
```
---
## Introduction
This guide covers training, evaluation, and prediction (inference) models
when using built-in APIs for training & validation (such as `Model.fit()`,
`Model.evaluate()` and `Model.predict()`).
If you are interested in leveraging `fit()` while specifying your
own training step function, see the guides on customizing what happens in `fit()`:
- [Writing a custom train step with TensorFlow](/guides/custom_train_step_in_tensorflow/)
- [Writing a custom train step with JAX](/guides/custom_train_step_in_jax/)
- [Writing a custom train step with PyTorch](/guides/custom_train_step_in_torch/)
If you are interested in writing your own training & evaluation loops from
scratch, see the guides on writing training loops:
- [Writing a training loop with TensorFlow](/guides/writing_a_custom_training_loop_in_tensorflow/)
- [Writing a training loop with JAX](/guides/writing_a_custom_training_loop_in_jax/)
- [Writing a training loop with PyTorch](/guides/writing_a_custom_training_loop_in_torch/)
In general, whether you are using built-in loops or writing your own, model training &
evaluation works strictly in the same way across every kind of Keras model --
Sequential models, models built with the Functional API, and models written from
scratch via model subclassing.
---
## API overview: a first end-to-end example
When passing data to the built-in training loops of a model, you should either use:
- NumPy arrays (if your data is small and fits in memory)
- Subclasses of `keras.utils.PyDataset`
- `tf.data.Dataset` objects
- PyTorch `DataLoader` instances
In the next few paragraphs, we'll use the MNIST dataset as NumPy arrays, in
order to demonstrate how to use optimizers, losses, and metrics. Afterwards, we'll
take a close look at each of the other options.
Let's consider the following model (here, we build in with the Functional API, but it
could be a Sequential model or a subclassed model as well):
```python
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
```
Here's what the typical end-to-end workflow looks like, consisting of:
- Training
- Validation on a holdout set generated from the original training data
- Evaluation on the test data
We'll use MNIST data for this example.
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess the data (these are NumPy arrays)
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
# Reserve 10,000 samples for validation
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
```
We specify the training configuration (optimizer, loss, metrics):
```python
model.compile(
optimizer=keras.optimizers.RMSprop(), # Optimizer
# Loss function to minimize
loss=keras.losses.SparseCategoricalCrossentropy(),
# List of metrics to monitor
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
```
We call `fit()`, which will train the model by slicing the data into "batches" of size
`batch_size`, and repeatedly iterating over the entire dataset for a given number of
`epochs`.
```python
print("Fit model on training data")
history = model.fit(
x_train,
y_train,
batch_size=64,
epochs=2,
# We pass some validation for
# monitoring validation loss and metrics
# at the end of each epoch
validation_data=(x_val, y_val),
)
```
<div class="k-default-codeblock">
```
Fit model on training data
Epoch 1/2
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 955us/step - loss: 0.5740 - sparse_categorical_accuracy: 0.8368 - val_loss: 0.2040 - val_sparse_categorical_accuracy: 0.9420
Epoch 2/2
782/782 ━━━━━━━━━━━━━━━━━━━━ 0s 390us/step - loss: 0.1745 - sparse_categorical_accuracy: 0.9492 - val_loss: 0.1415 - val_sparse_categorical_accuracy: 0.9581
```
</div>
The returned `history` object holds a record of the loss values and metric values
during training:
```python
print(history.history)
```
<div class="k-default-codeblock">
```
{'loss': [0.34448376297950745, 0.16419583559036255], 'sparse_categorical_accuracy': [0.9008600115776062, 0.9509199857711792], 'val_loss': [0.20404714345932007, 0.14145156741142273], 'val_sparse_categorical_accuracy': [0.9419999718666077, 0.9581000208854675]}
```
</div>
We evaluate the model on the test data via `evaluate()`:
```python
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = model.evaluate(x_test, y_test, batch_size=128)
print("test loss, test acc:", results)
# Generate predictions (probabilities -- the output of the last layer)
# on new data using `predict`
print("Generate predictions for 3 samples")
predictions = model.predict(x_test[:3])
print("predictions shape:", predictions.shape)
```
<div class="k-default-codeblock">
```
Evaluate on test data
79/79 ━━━━━━━━━━━━━━━━━━━━ 0s 271us/step - loss: 0.1670 - sparse_categorical_accuracy: 0.9489
test loss, test acc: [0.1484374850988388, 0.9550999999046326]
Generate predictions for 3 samples
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 33ms/step
predictions shape: (3, 10)
```
</div>
Now, let's review each piece of this workflow in detail.
---
## The `compile()` method: specifying a loss, metrics, and an optimizer
To train a model with `fit()`, you need to specify a loss function, an optimizer, and
optionally, some metrics to monitor.
You pass these to the model as arguments to the `compile()` method:
```python
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
```
The `metrics` argument should be a list -- your model can have any number of metrics.
If your model has multiple outputs, you can specify different losses and metrics for
each output, and you can modulate the contribution of each output to the total loss of
the model. You will find more details about this in the **Passing data to multi-input,
multi-output models** section.
Note that if you're satisfied with the default settings, in many cases the optimizer,
loss, and metrics can be specified via string identifiers as a shortcut:
```python
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
```
For later reuse, let's put our model definition and compile step in functions; we will
call them several times across different examples in this guide.
```python
def get_uncompiled_model():
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def get_compiled_model():
model = get_uncompiled_model()
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
return model
```
### Many built-in optimizers, losses, and metrics are available
In general, you won't have to create your own losses, metrics, or optimizers
from scratch, because what you need is likely to be already part of the Keras API:
Optimizers:
- `SGD()` (with or without momentum)
- `RMSprop()`
- `Adam()`
- etc.
Losses:
- `MeanSquaredError()`
- `KLDivergence()`
- `CosineSimilarity()`
- etc.
Metrics:
- `AUC()`
- `Precision()`
- `Recall()`
- etc.
### Custom losses
If you need to create a custom loss, Keras provides three ways to do so.
The first method involves creating a function that accepts inputs `y_true` and
`y_pred`. The following example shows a loss function that computes the mean squared
error between the real data and the predictions:
```python
def custom_mean_squared_error(y_true, y_pred):
return ops.mean(ops.square(y_true - y_pred), axis=-1)
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=custom_mean_squared_error)
# We need to one-hot encode the labels to use MSE
y_train_one_hot = ops.one_hot(y_train, num_classes=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)
```
<div class="k-default-codeblock">
```
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 525us/step - loss: 0.0277
<keras.src.callbacks.history.History at 0x2e5dde350>
```
</div>
If you need a loss function that takes in parameters beside `y_true` and `y_pred`, you
can subclass the `keras.losses.Loss` class and implement the following two methods:
- `__init__(self)`: accept parameters to pass during the call of your loss function
- `call(self, y_true, y_pred)`: use the targets (y_true) and the model predictions
(y_pred) to compute the model's loss
Let's say you want to use mean squared error, but with an added term that
will de-incentivize prediction values far from 0.5 (we assume that the categorical
targets are one-hot encoded and take values between 0 and 1). This
creates an incentive for the model not to be too confident, which may help
reduce overfitting (we won't know if it works until we try!).
Here's how you would do it:
```python
class CustomMSE(keras.losses.Loss):
def __init__(self, regularization_factor=0.1, name="custom_mse"):
super().__init__(name=name)
self.regularization_factor = regularization_factor
def call(self, y_true, y_pred):
mse = ops.mean(ops.square(y_true - y_pred), axis=-1)
reg = ops.mean(ops.square(0.5 - y_pred), axis=-1)
return mse + reg * self.regularization_factor
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=CustomMSE())
y_train_one_hot = ops.one_hot(y_train, num_classes=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)
```
<div class="k-default-codeblock">
```
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 532us/step - loss: 0.0492
<keras.src.callbacks.history.History at 0x2e5d0d360>
```
</div>
### Custom metrics
If you need a metric that isn't part of the API, you can easily create custom metrics
by subclassing the `keras.metrics.Metric` class. You will need to implement 4
methods:
- `__init__(self)`, in which you will create state variables for your metric.
- `update_state(self, y_true, y_pred, sample_weight=None)`, which uses the targets
y_true and the model predictions y_pred to update the state variables.
- `result(self)`, which uses the state variables to compute the final results.
- `reset_state(self)`, which reinitializes the state of the metric.
State update and results computation are kept separate (in `update_state()` and
`result()`, respectively) because in some cases, the results computation might be very
expensive and would only be done periodically.
Here's a simple example showing how to implement a `CategoricalTruePositives` metric
that counts how many samples were correctly classified as belonging to a given class:
```python
class CategoricalTruePositives(keras.metrics.Metric):
def __init__(self, name="categorical_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_variable(
shape=(), name="ctp", initializer="zeros"
)
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = ops.reshape(ops.argmax(y_pred, axis=1), (-1, 1))
values = ops.cast(y_true, "int32") == ops.cast(y_pred, "int32")
values = ops.cast(values, "float32")
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, "float32")
values = ops.multiply(values, sample_weight)
self.true_positives.assign_add(ops.sum(values))
def result(self):
return self.true_positives.value
def reset_state(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.0)
model = get_uncompiled_model()
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[CategoricalTruePositives()],
)
model.fit(x_train, y_train, batch_size=64, epochs=3)
```
<div class="k-default-codeblock">
```
Epoch 1/3
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 568us/step - categorical_true_positives: 180967.9219 - loss: 0.5876
Epoch 2/3
782/782 ━━━━━━━━━━━━━━━━━━━━ 0s 377us/step - categorical_true_positives: 182141.9375 - loss: 0.1733
Epoch 3/3
782/782 ━━━━━━━━━━━━━━━━━━━━ 0s 377us/step - categorical_true_positives: 182303.5312 - loss: 0.1180
<keras.src.callbacks.history.History at 0x2e5f02d10>
```
</div>
### Handling losses and metrics that don't fit the standard signature
The overwhelming majority of losses and metrics can be computed from `y_true` and
`y_pred`, where `y_pred` is an output of your model -- but not all of them. For
instance, a regularization loss may only require the activation of a layer (there are
no targets in this case), and this activation may not be a model output.
In such cases, you can call `self.add_loss(loss_value)` from inside the call method of
a custom layer. Losses added in this way get added to the "main" loss during training
(the one passed to `compile()`). Here's a simple example that adds activity
regularization (note that activity regularization is built-in in all Keras layers --
this layer is just for the sake of providing a concrete example):
```python
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(ops.sum(inputs) * 0.1)
return inputs # Pass-through layer.
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
# The displayed loss will be much higher than before
# due to the regularization component.
model.fit(x_train, y_train, batch_size=64, epochs=1)
```
<div class="k-default-codeblock">
```
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 505us/step - loss: 3.4083
<keras.src.callbacks.history.History at 0x2e60226b0>
```
</div>
Note that when you pass losses via `add_loss()`, it becomes possible to call
`compile()` without a loss function, since the model already has a loss to minimize.
Consider the following `LogisticEndpoint` layer: it takes as inputs
targets & logits, and it tracks a crossentropy loss via `add_loss()`.
```python
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
def call(self, targets, logits, sample_weights=None):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)
# Return the inference-time prediction tensor (for `.predict()`).
return ops.softmax(logits)
```
You can use it in a model with two inputs (input data & targets), compiled without a
`loss` argument, like this:
```python
inputs = keras.Input(shape=(3,), name="inputs")
targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(targets, logits)
model = keras.Model(inputs=[inputs, targets], outputs=predictions)
model.compile(optimizer="adam") # No loss argument!
data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 89ms/step - loss: 0.6982
<keras.src.callbacks.history.History at 0x2e5cc91e0>
```
</div>
For more information about training multi-input models, see the section **Passing data
to multi-input, multi-output models**.
### Automatically setting apart a validation holdout set
In the first end-to-end example you saw, we used the `validation_data` argument to pass
a tuple of NumPy arrays `(x_val, y_val)` to the model for evaluating a validation loss
and validation metrics at the end of each epoch.
Here's another option: the argument `validation_split` allows you to automatically
reserve part of your training data for validation. The argument value represents the
fraction of the data to be reserved for validation, so it should be set to a number
higher than 0 and lower than 1. For instance, `validation_split=0.2` means "use 20% of
the data for validation", and `validation_split=0.6` means "use 60% of the data for
validation".
The way the validation is computed is by taking the last x% samples of the arrays
received by the `fit()` call, before any shuffling.
Note that you can only use `validation_split` when training with NumPy data.
```python
model = get_compiled_model()
model.fit(x_train, y_train, batch_size=64, validation_split=0.2, epochs=1)
```
<div class="k-default-codeblock">
```
625/625 ━━━━━━━━━━━━━━━━━━━━ 1s 563us/step - loss: 0.6161 - sparse_categorical_accuracy: 0.8259 - val_loss: 0.2379 - val_sparse_categorical_accuracy: 0.9302
<keras.src.callbacks.history.History at 0x2e6007610>
```
</div>
---
## Training & evaluation using `tf.data` Datasets
In the past few paragraphs, you've seen how to handle losses, metrics, and optimizers,
and you've seen how to use the `validation_data` and `validation_split` arguments in
`fit()`, when your data is passed as NumPy arrays.
Another option is to use an iterator-like, such as a `tf.data.Dataset`, a
PyTorch `DataLoader`, or a Keras `PyDataset`. Let's take look at the former.
The `tf.data` API is a set of utilities in TensorFlow 2.0 for loading and preprocessing
data in a way that's fast and scalable. For a complete guide about creating `Datasets`,
see the [tf.data documentation](https://www.tensorflow.org/guide/data).
**You can use `tf.data` to train your Keras
models regardless of the backend you're using --
whether it's JAX, PyTorch, or TensorFlow.**
You can pass a `Dataset` instance directly to the methods `fit()`, `evaluate()`, and
`predict()`:
```python
model = get_compiled_model()
# First, let's create a training Dataset instance.
# For the sake of our example, we'll use the same MNIST data as before.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Now we get a test dataset.
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(64)
# Since the dataset already takes care of batching,
# we don't pass a `batch_size` argument.
model.fit(train_dataset, epochs=3)
# You can also evaluate or predict on a dataset.
print("Evaluate")
result = model.evaluate(test_dataset)
dict(zip(model.metrics_names, result))
```
<div class="k-default-codeblock">
```
Epoch 1/3
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 688us/step - loss: 0.5631 - sparse_categorical_accuracy: 0.8458
Epoch 2/3
782/782 ━━━━━━━━━━━━━━━━━━━━ 0s 512us/step - loss: 0.1703 - sparse_categorical_accuracy: 0.9484
Epoch 3/3
782/782 ━━━━━━━━━━━━━━━━━━━━ 0s 506us/step - loss: 0.1187 - sparse_categorical_accuracy: 0.9640
Evaluate
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 622us/step - loss: 0.1380 - sparse_categorical_accuracy: 0.9582
{'loss': 0.11913617700338364, 'compile_metrics': 0.965399980545044}
```
</div>
Note that the Dataset is reset at the end of each epoch, so it can be reused of the
next epoch.
If you want to run training only on a specific number of batches from this Dataset, you
can pass the `steps_per_epoch` argument, which specifies how many training steps the
model should run using this Dataset before moving on to the next epoch.
```python
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Only use the 100 batches per epoch (that's 64 * 100 samples)
model.fit(train_dataset, epochs=3, steps_per_epoch=100)
```
<div class="k-default-codeblock">
```
Epoch 1/3
100/100 ━━━━━━━━━━━━━━━━━━━━ 0s 508us/step - loss: 1.2000 - sparse_categorical_accuracy: 0.6822
Epoch 2/3
100/100 ━━━━━━━━━━━━━━━━━━━━ 0s 481us/step - loss: 0.4004 - sparse_categorical_accuracy: 0.8827
Epoch 3/3
100/100 ━━━━━━━━━━━━━━━━━━━━ 0s 471us/step - loss: 0.3546 - sparse_categorical_accuracy: 0.8968
<keras.src.callbacks.history.History at 0x2e64df400>
```
</div>
You can also pass a `Dataset` instance as the `validation_data` argument in `fit()`:
```python
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(train_dataset, epochs=1, validation_data=val_dataset)
```
<div class="k-default-codeblock">
```
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 837us/step - loss: 0.5569 - sparse_categorical_accuracy: 0.8508 - val_loss: 0.1711 - val_sparse_categorical_accuracy: 0.9527
<keras.src.callbacks.history.History at 0x2e641e920>
```
</div>
At the end of each epoch, the model will iterate over the validation dataset and
compute the validation loss and validation metrics.
If you want to run validation only on a specific number of batches from this dataset,
you can pass the `validation_steps` argument, which specifies how many validation
steps the model should run with the validation dataset before interrupting validation
and moving on to the next epoch:
```python
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(
train_dataset,
epochs=1,
# Only run validation using the first 10 batches of the dataset
# using the `validation_steps` argument
validation_data=val_dataset,
validation_steps=10,
)
```
<div class="k-default-codeblock">
```
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 771us/step - loss: 0.5562 - sparse_categorical_accuracy: 0.8436 - val_loss: 0.3345 - val_sparse_categorical_accuracy: 0.9062
<keras.src.callbacks.history.History at 0x2f9542e00>
```
</div>
Note that the validation dataset will be reset after each use (so that you will always
be evaluating on the same samples from epoch to epoch).
The argument `validation_split` (generating a holdout set from the training data) is
not supported when training from `Dataset` objects, since this feature requires the
ability to index the samples of the datasets, which is not possible in general with
the `Dataset` API.
---
## Training & evaluation using `PyDataset` instances
`keras.utils.PyDataset` is a utility that you can subclass to obtain
a Python generator with two important properties:
- It works well with multiprocessing.
- It can be shuffled (e.g. when passing `shuffle=True` in `fit()`).
A `PyDataset` must implement two methods:
- `__getitem__`
- `__len__`
The method `__getitem__` should return a complete batch.
If you want to modify your dataset between epochs, you may implement `on_epoch_end`.
Here's a quick example:
```python
class ExamplePyDataset(keras.utils.PyDataset):
def __init__(self, x, y, batch_size, **kwargs):
super().__init__(**kwargs)
self.x = x
self.y = y
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size : (idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size : (idx + 1) * self.batch_size]
return batch_x, batch_y
train_py_dataset = ExamplePyDataset(x_train, y_train, batch_size=32)
val_py_dataset = ExamplePyDataset(x_val, y_val, batch_size=32)
```
To fit the model, pass the dataset instead as the `x` argument (no need for a `y`
argument since the dataset includes the targets), and pass the validation dataset
as the `validation_data` argument. And no need for the `batch_size` argument, since
the dataset is already batched!
```python
model = get_compiled_model()
model.fit(train_py_dataset, batch_size=64, validation_data=val_py_dataset, epochs=1)
```
<div class="k-default-codeblock">
```
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 1s 443us/step - loss: 0.5217 - sparse_categorical_accuracy: 0.8473 - val_loss: 0.1576 - val_sparse_categorical_accuracy: 0.9525
<keras.src.callbacks.history.History at 0x2f9c8d120>
```
</div>
Evaluating the model is just as easy:
```python
model.evaluate(val_py_dataset)
```
<div class="k-default-codeblock">
```
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 157us/step - loss: 0.1821 - sparse_categorical_accuracy: 0.9450
[0.15764616429805756, 0.9524999856948853]
```
</div>
Importantly, `PyDataset` objects support three common constructor arguments
that handle the parallel processing configuration:
- `workers`: Number of workers to use in multithreading or
multiprocessing. Typically, you'd set it to the number of
cores on your CPU.
- `use_multiprocessing`: Whether to use Python multiprocessing for
parallelism. Setting this to `True` means that your
dataset will be replicated in multiple forked processes.
This is necessary to gain compute-level (rather than I/O level)
benefits from parallelism. However it can only be set to
`True` if your dataset can be safely pickled.
- `max_queue_size`: Maximum number of batches to keep in the queue
when iterating over the dataset in a multithreaded or
multipricessed setting.
You can reduce this value to reduce the CPU memory consumption of
your dataset. It defaults to 10.
By default, multiprocessing is disabled (`use_multiprocessing=False`) and only
one thread is used. You should make sure to only turn on `use_multiprocessing` if
your code is running inside a Python `if __name__ == "__main__":` block in order
to avoid issues.
Here's a 4-thread, non-multiprocessed example:
```python
train_py_dataset = ExamplePyDataset(x_train, y_train, batch_size=32, workers=4)
val_py_dataset = ExamplePyDataset(x_val, y_val, batch_size=32, workers=4)
model = get_compiled_model()
model.fit(train_py_dataset, batch_size=64, validation_data=val_py_dataset, epochs=1)
```
<div class="k-default-codeblock">
```
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 1s 561us/step - loss: 0.5146 - sparse_categorical_accuracy: 0.8516 - val_loss: 0.1623 - val_sparse_categorical_accuracy: 0.9514
<keras.src.callbacks.history.History at 0x2e7fd5ea0>
```
</div>
---
## Training & evaluation using PyTorch `DataLoader` objects
All built-in training and evaluation APIs are also compatible with `torch.utils.data.Dataset` and
`torch.utils.data.DataLoader` objects -- regardless of whether you're using the PyTorch backend,
or the JAX or TensorFlow backends. Let's take a look at a simple example.
Unlike `PyDataset` which are batch-centric, PyTorch `Dataset` objects are sample-centric:
the `__len__` method returns the number of samples,
and the `__getitem__` method returns a specific sample.
```python
class ExampleTorchDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
train_torch_dataset = ExampleTorchDataset(x_train, y_train)
val_torch_dataset = ExampleTorchDataset(x_val, y_val)
```
To use a PyTorch Dataset, you need to wrap it into a `Dataloader` which takes care
of batching and shuffling:
```python
train_dataloader = torch.utils.data.DataLoader(
train_torch_dataset, batch_size=32, shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
val_torch_dataset, batch_size=32, shuffle=True
)
```
Now you can use them in the Keras API just like any other iterator:
```python
model = get_compiled_model()
model.fit(train_dataloader, batch_size=64, validation_data=val_dataloader, epochs=1)
model.evaluate(val_dataloader)
```
<div class="k-default-codeblock">
```
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 1s 575us/step - loss: 0.5051 - sparse_categorical_accuracy: 0.8568 - val_loss: 0.1613 - val_sparse_categorical_accuracy: 0.9528
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 278us/step - loss: 0.1551 - sparse_categorical_accuracy: 0.9541
[0.16209803521633148, 0.9527999758720398]
```
</div>
---
## Using sample weighting and class weighting
With the default settings the weight of a sample is decided by its frequency
in the dataset. There are two methods to weight the data, independent of
sample frequency:
* Class weights
* Sample weights
### Class weights
This is set by passing a dictionary to the `class_weight` argument to
`Model.fit()`. This dictionary maps class indices to the weight that should
be used for samples belonging to this class.
This can be used to balance classes without resampling, or to train a
model that gives more importance to a particular class.
For instance, if class "0" is half as represented as class "1" in your data,
you could use `Model.fit(..., class_weight={0: 1., 1: 0.5})`.
Here's a NumPy example where we use class weights or sample weights to
give more importance to the correct classification of class #5 (which
is the digit "5" in the MNIST dataset).
```python
class_weight = {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
4: 1.0,
# Set weight "2" for class "5",
# making this class 2x more important
5: 2.0,
6: 1.0,
7: 1.0,
8: 1.0,
9: 1.0,
}
print("Fit with class weight")
model = get_compiled_model()
model.fit(x_train, y_train, class_weight=class_weight, batch_size=64, epochs=1)
```
<div class="k-default-codeblock">
```
Fit with class weight
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 534us/step - loss: 0.6205 - sparse_categorical_accuracy: 0.8375
<keras.src.callbacks.history.History at 0x298d44eb0>
```
</div>
### Sample weights
For fine grained control, or if you are not building a classifier,
you can use "sample weights".
- When training from NumPy data: Pass the `sample_weight`
argument to `Model.fit()`.
- When training from `tf.data` or any other sort of iterator:
Yield `(input_batch, label_batch, sample_weight_batch)` tuples.
A "sample weights" array is an array of numbers that specify how much weight
each sample in a batch should have in computing the total loss. It is commonly
used in imbalanced classification problems (the idea being to give more weight
to rarely-seen classes).
When the weights used are ones and zeros, the array can be used as a *mask* for
the loss function (entirely discarding the contribution of certain samples to
the total loss).
```python
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
print("Fit with sample weight")
model = get_compiled_model()
model.fit(x_train, y_train, sample_weight=sample_weight, batch_size=64, epochs=1)
```
<div class="k-default-codeblock">
```
Fit with sample weight
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 546us/step - loss: 0.6397 - sparse_categorical_accuracy: 0.8388
<keras.src.callbacks.history.History at 0x298e066e0>
```
</div>
Here's a matching `Dataset` example:
```python
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
# Create a Dataset that includes sample weights
# (3rd element in the return tuple).
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train, sample_weight))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model = get_compiled_model()
model.fit(train_dataset, epochs=1)
```
<div class="k-default-codeblock">
```
782/782 ━━━━━━━━━━━━━━━━━━━━ 1s 651us/step - loss: 0.5971 - sparse_categorical_accuracy: 0.8445
<keras.src.callbacks.history.History at 0x312854100>
```
</div>
---
## Passing data to multi-input, multi-output models
In the previous examples, we were considering a model with a single input (a tensor of
shape `(764,)`) and a single output (a prediction tensor of shape `(10,)`). But what
about models that have multiple inputs or outputs?
Consider the following model, which has an image input of shape `(32, 32, 3)` (that's
`(height, width, channels)`) and a time series input of shape `(None, 10)` (that's
`(timesteps, features)`). Our model will have two outputs computed from the
combination of these inputs: a "score" (of shape `(1,)`) and a probability
distribution over five classes (of shape `(5,)`).
```python
image_input = keras.Input(shape=(32, 32, 3), name="img_input")
timeseries_input = keras.Input(shape=(None, 10), name="ts_input")
x1 = layers.Conv2D(3, 3)(image_input)
x1 = layers.GlobalMaxPooling2D()(x1)
x2 = layers.Conv1D(3, 3)(timeseries_input)
x2 = layers.GlobalMaxPooling1D()(x2)
x = layers.concatenate([x1, x2])
score_output = layers.Dense(1, name="score_output")(x)
class_output = layers.Dense(5, name="class_output")(x)
model = keras.Model(
inputs=[image_input, timeseries_input], outputs=[score_output, class_output]
)
```
Let's plot this model, so you can clearly see what we're doing here (note that the
shapes shown in the plot are batch shapes, rather than per-sample shapes).
```python
keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)
```

At compilation time, we can specify different losses to different outputs, by passing
the loss functions as a list:
```python
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy(),
],
)
```
If we only passed a single loss function to the model, the same loss function would be
applied to every output (which is not appropriate here).
Likewise for metrics:
```python
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy(),
],
metrics=[
[
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
[keras.metrics.CategoricalAccuracy()],
],
)
```
Since we gave names to our output layers, we could also specify per-output losses and
metrics via a dict:
```python
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
)
```
We recommend the use of explicit names and dicts if you have more than 2 outputs.
It's possible to give different weights to different output-specific losses (for
instance, one might wish to privilege the "score" loss in our example, by giving to 2x
the importance of the class loss), using the `loss_weights` argument:
```python
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
loss_weights={"score_output": 2.0, "class_output": 1.0},
)
```
You could also choose not to compute a loss for certain outputs, if these outputs are
meant for prediction but not for training:
```python
# List loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[None, keras.losses.CategoricalCrossentropy()],
)
# Or dict loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={"class_output": keras.losses.CategoricalCrossentropy()},
)
```
Passing data to a multi-input or multi-output model in `fit()` works in a similar way as
specifying a loss function in compile: you can pass **lists of NumPy arrays** (with
1:1 mapping to the outputs that received a loss function) or **dicts mapping output
names to NumPy arrays**.
```python
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy(),
],
)
# Generate dummy NumPy data
img_data = np.random.random_sample(size=(100, 32, 32, 3))
ts_data = np.random.random_sample(size=(100, 20, 10))
score_targets = np.random.random_sample(size=(100, 1))
class_targets = np.random.random_sample(size=(100, 5))
# Fit on lists
model.fit([img_data, ts_data], [score_targets, class_targets], batch_size=32, epochs=1)
# Alternatively, fit on dicts
model.fit(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
batch_size=32,
epochs=1,
)
```
<div class="k-default-codeblock">
```
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 62ms/step - loss: 18.0146
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step - loss: 17.6494
<keras.src.callbacks.history.History at 0x31a6c5810>
```
</div>
Here's the `Dataset` use case: similarly as what we did for NumPy arrays, the `Dataset`
should return a tuple of dicts.
```python
train_dataset = tf.data.Dataset.from_tensor_slices(
(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
)
)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model.fit(train_dataset, epochs=1)
```
<div class="k-default-codeblock">
```
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 197ms/step - loss: 17.8578
<keras.src.callbacks.history.History at 0x17c7e5690>
```
</div>
---
## Using callbacks
Callbacks in Keras are objects that are called at different points during training (at
the start of an epoch, at the end of a batch, at the end of an epoch, etc.). They
can be used to implement certain behaviors, such as:
- Doing validation at different points during training (beyond the built-in per-epoch
validation)
- Checkpointing the model at regular intervals or when it exceeds a certain accuracy
threshold
- Changing the learning rate of the model when training seems to be plateauing
- Doing fine-tuning of the top layers when training seems to be plateauing
- Sending email or instant message notifications when training ends or where a certain
performance threshold is exceeded
- Etc.
Callbacks can be passed as a list to your call to `fit()`:
```python
model = get_compiled_model()
callbacks = [
keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor="val_loss",
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=2,
verbose=1,
)
]
model.fit(
x_train,
y_train,
epochs=20,
batch_size=64,
callbacks=callbacks,
validation_split=0.2,
)
```
<div class="k-default-codeblock">
```
Epoch 1/20
625/625 ━━━━━━━━━━━━━━━━━━━━ 1s 622us/step - loss: 0.6245 - sparse_categorical_accuracy: 0.8275 - val_loss: 0.2231 - val_sparse_categorical_accuracy: 0.9330
Epoch 2/20
625/625 ━━━━━━━━━━━━━━━━━━━━ 0s 404us/step - loss: 0.1809 - sparse_categorical_accuracy: 0.9460 - val_loss: 0.1727 - val_sparse_categorical_accuracy: 0.9476
Epoch 3/20
625/625 ━━━━━━━━━━━━━━━━━━━━ 0s 398us/step - loss: 0.1336 - sparse_categorical_accuracy: 0.9598 - val_loss: 0.1564 - val_sparse_categorical_accuracy: 0.9545
Epoch 4/20
625/625 ━━━━━━━━━━━━━━━━━━━━ 0s 400us/step - loss: 0.1012 - sparse_categorical_accuracy: 0.9699 - val_loss: 0.1502 - val_sparse_categorical_accuracy: 0.9570
Epoch 5/20
625/625 ━━━━━━━━━━━━━━━━━━━━ 0s 403us/step - loss: 0.0835 - sparse_categorical_accuracy: 0.9748 - val_loss: 0.1436 - val_sparse_categorical_accuracy: 0.9589
Epoch 6/20
625/625 ━━━━━━━━━━━━━━━━━━━━ 0s 396us/step - loss: 0.0699 - sparse_categorical_accuracy: 0.9783 - val_loss: 0.1484 - val_sparse_categorical_accuracy: 0.9577
Epoch 7/20
625/625 ━━━━━━━━━━━━━━━━━━━━ 0s 402us/step - loss: 0.0603 - sparse_categorical_accuracy: 0.9814 - val_loss: 0.1406 - val_sparse_categorical_accuracy: 0.9629
Epoch 7: early stopping
<keras.src.callbacks.history.History at 0x31ae37c10>
```
</div>
### Many built-in callbacks are available
There are many built-in callbacks already available in Keras, such as:
- `ModelCheckpoint`: Periodically save the model.
- `EarlyStopping`: Stop training when training is no longer improving the validation
metrics.
- `TensorBoard`: periodically write model logs that can be visualized in
[TensorBoard](https://www.tensorflow.org/tensorboard) (more details in the section
"Visualization").
- `CSVLogger`: streams loss and metrics data to a CSV file.
- etc.
See the [callbacks documentation](/api/callbacks/) for the complete list.
### Writing your own callback
You can create a custom callback by extending the base class
`keras.callbacks.Callback`. A callback has access to its associated model through the
class property `self.model`.
Make sure to read the
[complete guide to writing custom callbacks](/guides/writing_your_own_callbacks/).
Here's a simple example saving a list of per-batch loss values during training:
```python
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs):
self.per_batch_losses = []
def on_batch_end(self, batch, logs):
self.per_batch_losses.append(logs.get("loss"))
```
---
## Checkpointing models
When you're training model on relatively large datasets, it's crucial to save
checkpoints of your model at frequent intervals.
The easiest way to achieve this is with the `ModelCheckpoint` callback:
```python
model = get_compiled_model()
callbacks = [
keras.callbacks.ModelCheckpoint(
# Path where to save the model
# The two parameters below mean that we will overwrite
# the current checkpoint if and only if
# the `val_loss` score has improved.
# The saved model name will include the current epoch.
filepath="mymodel_{epoch}.keras",
save_best_only=True, # Only save a model if `val_loss` has improved.
monitor="val_loss",
verbose=1,
)
]
model.fit(
x_train,
y_train,
epochs=2,
batch_size=64,
callbacks=callbacks,
validation_split=0.2,
)
```
<div class="k-default-codeblock">
```
Epoch 1/2
559/625 ━━━━━━━━━━━━━━━━━[37m━━━ 0s 360us/step - loss: 0.6490 - sparse_categorical_accuracy: 0.8209
Epoch 1: val_loss improved from inf to 0.22393, saving model to mymodel_1.keras
625/625 ━━━━━━━━━━━━━━━━━━━━ 1s 577us/step - loss: 0.6194 - sparse_categorical_accuracy: 0.8289 - val_loss: 0.2239 - val_sparse_categorical_accuracy: 0.9340
Epoch 2/2
565/625 ━━━━━━━━━━━━━━━━━━[37m━━ 0s 355us/step - loss: 0.1816 - sparse_categorical_accuracy: 0.9476
Epoch 2: val_loss improved from 0.22393 to 0.16868, saving model to mymodel_2.keras
625/625 ━━━━━━━━━━━━━━━━━━━━ 0s 411us/step - loss: 0.1806 - sparse_categorical_accuracy: 0.9479 - val_loss: 0.1687 - val_sparse_categorical_accuracy: 0.9494
<keras.src.callbacks.history.History at 0x2e5cb7250>
```
</div>
The `ModelCheckpoint` callback can be used to implement fault-tolerance:
the ability to restart training from the last saved state of the model in case training
gets randomly interrupted. Here's a basic example:
```python
# Prepare a directory to store all the checkpoints.
checkpoint_dir = "./ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def make_or_restore_model():
# Either restore the latest model, or create a fresh one
# if there is no checkpoint available.
checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)]
if checkpoints:
latest_checkpoint = max(checkpoints, key=os.path.getctime)
print("Restoring from", latest_checkpoint)
return keras.models.load_model(latest_checkpoint)
print("Creating a new model")
return get_compiled_model()
model = make_or_restore_model()
callbacks = [
# This callback saves the model every 100 batches.
# We include the training loss in the saved model name.
keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + "/model-loss={loss:.2f}.keras", save_freq=100
)
]
model.fit(x_train, y_train, epochs=1, callbacks=callbacks)
```
<div class="k-default-codeblock">
```
Creating a new model
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 1s 390us/step - loss: 0.4910 - sparse_categorical_accuracy: 0.8623
<keras.src.callbacks.history.History at 0x2e5c454e0>
```
</div>
You call also write your own callback for saving and restoring models.
For a complete guide on serialization and saving, see the
[guide to saving and serializing Models](/guides/serialization_and_saving/).
---
## Using learning rate schedules
A common pattern when training deep learning models is to gradually reduce the learning
as training progresses. This is generally known as "learning rate decay".
The learning decay schedule could be static (fixed in advance, as a function of the
current epoch or the current batch index), or dynamic (responding to the current
behavior of the model, in particular the validation loss).
### Passing a schedule to an optimizer
You can easily use a static learning rate decay schedule by passing a schedule object
as the `learning_rate` argument in your optimizer:
```python
initial_learning_rate = 0.1
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule)
```
Several built-in schedules are available: `ExponentialDecay`, `PiecewiseConstantDecay`,
`PolynomialDecay`, and `InverseTimeDecay`.
### Using callbacks to implement a dynamic learning rate schedule
A dynamic learning rate schedule (for instance, decreasing the learning rate when the
validation loss is no longer improving) cannot be achieved with these schedule objects,
since the optimizer does not have access to validation metrics.
However, callbacks do have access to all metrics, including validation metrics! You can
thus achieve this pattern by using a callback that modifies the current learning rate
on the optimizer. In fact, this is even built-in as the `ReduceLROnPlateau` callback.
---
## Visualizing loss and metrics during training with TensorBoard
The best way to keep an eye on your model during training is to use
[TensorBoard](https://www.tensorflow.org/tensorboard) -- a browser-based application
that you can run locally that provides you with:
- Live plots of the loss and metrics for training and evaluation
- (optionally) Visualizations of the histograms of your layer activations
- (optionally) 3D visualizations of the embedding spaces learned by your `Embedding`
layers
If you have installed TensorFlow with pip, you should be able to launch TensorBoard
from the command line:
```
tensorboard --logdir=/full_path_to_your_logs
```
### Using the TensorBoard callback
The easiest way to use TensorBoard with a Keras model and the `fit()` method is the
`TensorBoard` callback.
In the simplest case, just specify where you want the callback to write logs, and
you're good to go:
```python
keras.callbacks.TensorBoard(
log_dir="/full_path_to_your_logs",
histogram_freq=0, # How often to log histogram visualizations
embeddings_freq=0, # How often to log embedding visualizations
update_freq="epoch",
) # How often to write logs (default: once per epoch)
```
<div class="k-default-codeblock">
```
<keras.src.callbacks.tensorboard.TensorBoard at 0x31b0188b0>
```
</div>
For more information, see the
[documentation for the `TensorBoard` callback](https://keras.io/api/callbacks/tensorboard/).
| keras-io/guides/md/training_with_built_in_methods.md/0 | {
"file_path": "keras-io/guides/md/training_with_built_in_methods.md",
"repo_id": "keras-io",
"token_count": 17989
} | 122 |
"""
Title: Writing a training loop from scratch in PyTorch
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2023/06/25
Last modified: 2023/06/25
Description: Writing low-level training & evaluation loops in PyTorch.
Accelerator: None
"""
"""
## Setup
"""
import os
# This guide can only be run with the torch backend.
os.environ["KERAS_BACKEND"] = "torch"
import torch
import keras
import numpy as np
"""
## Introduction
Keras provides default training and evaluation loops, `fit()` and `evaluate()`.
Their usage is covered in the guide
[Training & evaluation with the built-in methods](/guides/training_with_built_in_methods/).
If you want to customize the learning algorithm of your model while still leveraging
the convenience of `fit()`
(for instance, to train a GAN using `fit()`), you can subclass the `Model` class and
implement your own `train_step()` method, which
is called repeatedly during `fit()`.
Now, if you want very low-level control over training & evaluation, you should write
your own training & evaluation loops from scratch. This is what this guide is about.
"""
"""
## A first end-to-end example
To write a custom training loop, we need the following ingredients:
- A model to train, of course.
- An optimizer. You could either use a `keras.optimizers` optimizer,
or a native PyTorch optimizer from `torch.optim`.
- A loss function. You could either use a `keras.losses` loss,
or a native PyTorch loss from `torch.nn`.
- A dataset. You could use any format: a `tf.data.Dataset`,
a PyTorch `DataLoader`, a Python generator, etc.
Let's line them up. We'll use torch-native objects in each case --
except, of course, for the Keras model.
First, let's get the model and the MNIST dataset:
"""
# Let's consider a simple MNIST model
def get_model():
inputs = keras.Input(shape=(784,), name="digits")
x1 = keras.layers.Dense(64, activation="relu")(inputs)
x2 = keras.layers.Dense(64, activation="relu")(x1)
outputs = keras.layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
# Create load up the MNIST dataset and put it in a torch DataLoader
# Prepare the training dataset.
batch_size = 32
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784)).astype("float32")
x_test = np.reshape(x_test, (-1, 784)).astype("float32")
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Create torch Datasets
train_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
val_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_val), torch.from_numpy(y_val)
)
# Create DataLoaders for the Datasets
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=False
)
"""
Next, here's our PyTorch optimizer and our PyTorch loss function:
"""
# Instantiate a torch optimizer
model = get_model()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Instantiate a torch loss function
loss_fn = torch.nn.CrossEntropyLoss()
"""
Let's train our model using mini-batch gradient with a custom training loop.
Calling `loss.backward()` on a loss tensor triggers backpropagation.
Once that's done, your optimizer is magically aware of the gradients for each variable
and can update its variables, which is done via `optimizer.step()`.
Tensors, variables, optimizers are all interconnected to one another via hidden global state.
Also, don't forget to call `model.zero_grad()` before `loss.backward()`, or you won't
get the right gradients for your variables.
Here's our training loop, step by step:
- We open a `for` loop that iterates over epochs
- For each epoch, we open a `for` loop that iterates over the dataset, in batches
- For each batch, we call the model on the input data to retrieve the predictions,
then we use them to compute a loss value
- We call `loss.backward()` to
- Outside the scope, we retrieve the gradients of the weights
of the model with regard to the loss
- Finally, we use the optimizer to update the weights of the model based on the
gradients
"""
epochs = 3
for epoch in range(epochs):
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(logits, targets)
# Backward pass
model.zero_grad()
loss.backward()
# Optimizer variable updates
optimizer.step()
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
"""
As an alternative, let's look at what the loop looks like when using a Keras optimizer
and a Keras loss function.
Important differences:
- You retrieve the gradients for the variables via `v.value.grad`,
called on each trainable variable.
- You update your variables via `optimizer.apply()`, which must be
called in a `torch.no_grad()` scope.
**Also, a big gotcha:** while all NumPy/TensorFlow/JAX/Keras APIs
as well as Python `unittest` APIs use the argument order convention
`fn(y_true, y_pred)` (reference values first, predicted values second),
PyTorch actually uses `fn(y_pred, y_true)` for its losses.
So make sure to invert the order of `logits` and `targets`.
"""
model = get_model()
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
"""
## Low-level handling of metrics
Let's add metrics monitoring to this basic training loop.
You can readily reuse built-in Keras metrics (or custom ones you wrote) in such training
loops written from scratch. Here's the flow:
- Instantiate the metric at the start of the loop
- Call `metric.update_state()` after each batch
- Call `metric.result()` when you need to display the current value of the metric
- Call `metric.reset_state()` when you need to clear the state of the metric
(typically at the end of an epoch)
Let's use this knowledge to compute `CategoricalAccuracy` on training and
validation data at the end of each epoch:
"""
# Get a fresh model
model = get_model()
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.CategoricalAccuracy()
val_acc_metric = keras.metrics.CategoricalAccuracy()
"""
Here's our training & evaluation loop:
"""
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Update training metric.
train_acc_metric.update_state(targets, logits)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataloader:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")
"""
## Low-level handling of losses tracked by the model
Layers & models recursively track any losses created during the forward pass
by layers that call `self.add_loss(value)`. The resulting list of scalar loss
values are available via the property `model.losses`
at the end of the forward pass.
If you want to be using these loss components, you should sum them
and add them to the main loss in your training step.
Consider this layer, that creates an activity regularization loss:
"""
class ActivityRegularizationLayer(keras.layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * torch.sum(inputs))
return inputs
"""
Let's build a really simple model that uses it:
"""
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = keras.layers.Dense(64, activation="relu")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
"""
Here's what our training loop should look like now:
"""
# Get a fresh model
model = get_model()
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.CategoricalAccuracy()
val_acc_metric = keras.metrics.CategoricalAccuracy()
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
if model.losses:
loss = loss + torch.sum(*model.losses)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Update training metric.
train_acc_metric.update_state(targets, logits)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataloader:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")
"""
That's it!
"""
| keras-io/guides/writing_a_custom_training_loop_in_torch.py/0 | {
"file_path": "keras-io/guides/writing_a_custom_training_loop_in_torch.py",
"repo_id": "keras-io",
"token_count": 4612
} | 123 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/data_loading/text/'" />
| keras-io/redirects/api/preprocessing/text/index.html/0 | {
"file_path": "keras-io/redirects/api/preprocessing/text/index.html",
"repo_id": "keras-io",
"token_count": 35
} | 124 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/guides/keras_cv/object_detection_keras_cv/'" />
| keras-io/redirects/guides/keras_cv/retina_net_overview/index.html/0 | {
"file_path": "keras-io/redirects/guides/keras_cv/retina_net_overview/index.html",
"repo_id": "keras-io",
"token_count": 45
} | 125 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/metrics/'" />
| keras-io/redirects/metrics/index.html/0 | {
"file_path": "keras-io/redirects/metrics/index.html",
"repo_id": "keras-io",
"token_count": 32
} | 126 |
"""Documentation generator for Keras.io
USAGE:
python autogen.py make
python autogen.py serve
"""
import shutil
import copy
import json
import re
import os
import sys
from pathlib import Path
import http.server
import socketserver
import signal
import docstrings
import jinja2
import multiprocessing
import autogen_utils
from master import MASTER
from examples_master import EXAMPLES_MASTER
import tutobooks
import generate_tf_guides
import render_tags
try:
import keras_nlp
except Exception as e:
print(f"Could not import Keras NLP. Exception: {e}")
keras_nlp = None
try:
import keras_cv
except Exception as e:
print(f"Could not import Keras CV. Exception: {e}")
keras_cv = None
EXAMPLES_GH_LOCATION = Path("keras-team") / "keras-io" / "blob" / "master" / "examples"
GUIDES_GH_LOCATION = Path("keras-team") / "keras-io" / "blob" / "master" / "guides"
KERAS_TEAM_GH = "https://github.com/keras-team"
PROJECT_URL = {
"keras": f"{KERAS_TEAM_GH}/keras/tree/v3.0.5/",
"keras_tuner": f"{KERAS_TEAM_GH}/keras-tuner/tree/v1.4.6/",
"keras_cv": f"{KERAS_TEAM_GH}/keras-cv/tree/v0.8.2/",
"keras_nlp": f"{KERAS_TEAM_GH}/keras-nlp/tree/v0.8.0/",
"tf_keras": f"{KERAS_TEAM_GH}/tf-keras/tree/v2.15.0/",
}
USE_MULTIPROCESSING = False
class KerasIO:
def __init__(
self,
master,
url,
templates_dir,
md_sources_dir,
site_dir,
theme_dir,
guides_dir,
examples_dir,
redirects_dir,
refresh_guides=False,
refresh_examples=False,
):
self.master = master
self.url = url
self.templates_dir = templates_dir
self.md_sources_dir = md_sources_dir
self.site_dir = site_dir
self.theme_dir = theme_dir
self.guides_dir = guides_dir
self.examples_dir = examples_dir
self.redirects_dir = redirects_dir
self.refresh_guides = refresh_guides
self.refresh_examples = refresh_examples
self.make_examples_master()
self.nav = self.make_nav_index()
self.docstring_printer = docstrings.KerasDocumentationGenerator(PROJECT_URL)
def make_examples_master(self):
for entry in self.master["children"]:
if entry["path"] == "examples/":
examples_entry = entry
break
for entry in examples_entry["children"]: # e.g. {"path": "nlp", ...}
children = entry.get("children", [])
preexisting = [e["path"] for e in children]
subdir = entry["path"] # e.g. nlp
path = Path(self.examples_dir) / subdir # e.g. examples/nlp
for fname in sorted(os.listdir(path)):
if fname.endswith(".py"): # e.g. examples/nlp/test.py
name = fname[:-3]
example_path = name.split("/")[-1]
if example_path not in preexisting:
f = open(path / fname, encoding="utf-8")
f.readline()
title_line = f.readline()
f.close()
assert title_line.startswith("Title: ")
title = title_line[len("Title: ") :]
children.append({"path": example_path, "title": title.strip()})
entry["children"] = children
def make_md_sources(self):
print("Generating md sources")
if os.path.exists(self.md_sources_dir):
print("Clearing", self.md_sources_dir)
shutil.rmtree(self.md_sources_dir)
os.makedirs(self.md_sources_dir)
self.make_tutobook_sources(
guides=self.refresh_guides, examples=self.refresh_examples
)
self.sync_tutobook_templates()
# Recursively generate all md sources based on the MASTER tree
self.make_md_source_for_entry(self.master, path_stack=[], title_stack=[])
def preprocess_tutobook_md_source(
self, md_content, fname, github_repo_dir, img_dir, site_img_dir
):
# Insert colab button and github button.
name = fname[:-3]
md_content_lines = md_content.split("\n")
button_lines = [
"\n",
'<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> '
"[**View in Colab**](https://colab.research.google.com/github/"
+ github_repo_dir
+ "/ipynb/"
+ name
+ ".ipynb"
+ ") "
'<span class="k-dot">•</span>'
'<img class="k-inline-icon" src="https://github.com/favicon.ico"/> '
"[**GitHub source**](https://github.com/"
+ github_repo_dir
+ "/"
+ fname
+ ")",
"\n",
]
md_content_lines = md_content_lines[:6] + button_lines + md_content_lines[6:]
md_content = "\n".join(md_content_lines)
# Normalize img urls
md_content = md_content.replace(
str(img_dir) + "/" + name, self.url + site_img_dir
)
# Insert --- before H2 titles
md_content = md_content.replace("\n## ", "\n---\n## ")
# Clean up progress bar output
if "[1m" in md_content:
md_content = md_content.replace("[1m", " ")
md_content = md_content.replace("[0m [32m", " ")
md_content = md_content.replace("[0m[37m[0m [1m", " ")
md_content = md_content.replace("[0m", "")
md_content = md_content.replace("[37m ", "")
return md_content
def make_tutobook_sources_for_directory(
self, src_dir, target_dir, img_dir, site_img_dir, github_repo_dir
):
# e.g.
# make_tutobook_sources_for_directory(
# "examples/nlp", "examples/nlp/md", "examples/nlp/img", "img/examples/nlp")
print("Making tutobook sources for", src_dir)
working_ipynb_dir = Path(src_dir) / "ipynb"
if not os.path.exists(working_ipynb_dir):
os.makedirs(working_ipynb_dir)
for fname in os.listdir(src_dir):
if fname.endswith(".py"):
print("...Processing", fname)
name = fname[:-3]
py_path = Path(src_dir) / fname
nb_path = working_ipynb_dir / (name + ".ipynb")
md_path = Path(target_dir) / (name + ".md")
tutobooks.py_to_md(py_path, nb_path, md_path, img_dir)
md_content = open(md_path).read()
md_content = self.preprocess_tutobook_md_source(
md_content, fname, github_repo_dir, img_dir, site_img_dir
)
open(md_path, "w").write(md_content)
shutil.rmtree(working_ipynb_dir)
def make_tutobook_ipynbs(self):
def process_one_dir(src_dir, target_dir):
if os.path.exists(target_dir):
print("Clearing", target_dir)
shutil.rmtree(target_dir)
os.makedirs(target_dir)
for fname in os.listdir(src_dir):
if fname.endswith(".py"):
print("...Processing", fname)
name = fname[:-3]
py_path = Path(src_dir) / fname
nb_path = target_dir / (name + ".ipynb")
tutobooks.py_to_nb(py_path, nb_path, fill_outputs=False)
# Guides
guides_dir = Path(self.guides_dir)
ipynb_dir = guides_dir / "ipynb"
process_one_dir(guides_dir, ipynb_dir)
# Examples
for name in os.listdir(self.examples_dir):
path = Path(self.examples_dir) / name
if os.path.isdir(path):
ipynb_dir = path / "ipynb"
process_one_dir(path, ipynb_dir)
def add_example(self, path, working_dir=None):
"""e.g. add_example('vision/cats_and_dogs')"""
# Prune out the ../ path
if path.startswith("../examples/"):
path = path.replace("../examples/", "")
folder, name = path.split(os.path.sep)
assert path.count(os.path.sep) == 1
if name.endswith(".py"):
name = name[:-3]
ipynb_dir = Path(self.examples_dir) / folder / "ipynb"
if not os.path.exists(ipynb_dir):
os.makedirs(ipynb_dir)
md_dir = Path(self.examples_dir) / folder / "md"
if not os.path.exists(md_dir):
os.makedirs(md_dir)
img_dir = Path(self.examples_dir) / folder / "img"
if not os.path.exists(img_dir):
os.makedirs(img_dir)
py_path = Path(self.examples_dir) / folder / (name + ".py")
md_path = md_dir / (name + ".md")
nb_path = ipynb_dir / (name + ".ipynb")
self.disable_warnings()
tutobooks.py_to_nb(py_path, nb_path, fill_outputs=False)
tutobooks.py_to_md(py_path, nb_path, md_path, img_dir, working_dir=working_dir)
md_content = open(md_path).read()
github_repo_dir = str(EXAMPLES_GH_LOCATION / folder)
site_img_dir = os.path.join("img", "examples", folder, name)
md_content = self.preprocess_tutobook_md_source(
md_content, name + ".py", github_repo_dir, img_dir, site_img_dir
)
open(md_path, "w").write(md_content)
def add_guide(self, name, working_dir=None):
"""e.g. add_guide('functional_api')"""
# Prune out the ../ path
if name.startswith("../guides/"):
name = name.replace("../guides/", "")
if name.endswith(".py"):
name = name[:-3]
ipynb_dir = Path(self.guides_dir) / "ipynb"
if not os.path.exists(ipynb_dir):
os.makedirs(ipynb_dir)
md_dir = Path(self.guides_dir) / "md"
if not os.path.exists(md_dir):
os.makedirs(md_dir)
img_dir = Path(self.guides_dir) / "img"
if not os.path.exists(img_dir):
os.makedirs(img_dir)
py_path = Path(self.guides_dir) / (name + ".py")
md_path = md_dir / (name + ".md")
nb_path = ipynb_dir / (name + ".ipynb")
self.disable_warnings()
tutobooks.py_to_nb(py_path, nb_path, fill_outputs=False)
tutobooks.py_to_md(py_path, nb_path, md_path, img_dir, working_dir=working_dir)
md_content = open(md_path).read()
md_content = md_content.replace("../guides/img/", "/img/guides/")
github_repo_dir = str(GUIDES_GH_LOCATION)
site_img_dir = "img/guides/" + name
md_content = self.preprocess_tutobook_md_source(
md_content, name + ".py", github_repo_dir, img_dir, site_img_dir
)
open(md_path, "w").write(md_content)
@staticmethod
def disable_warnings():
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
os.environ["AUTOGRAPH_VERBOSITY"] = "0"
def make_tutobook_sources(self, guides=True, examples=True):
"""Populate `examples/nlp/md`, `examples/nlp/img/`, etc.
- guides/md/ & /png/
- examples/nlp/md/ & /png/
- examples/computer_vision/md/ & /png/
- examples/structured_data/md/ & /png/
- examples/timeseries/md/ & /png/
- examples/generative_dl/md/ & /png/
- examples/keras_recipes/md/ & /png/
"""
# Guides
if guides:
target_dir = Path(self.guides_dir) / "md"
img_dir = Path(self.guides_dir) / "img"
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
if os.path.exists(img_dir):
shutil.rmtree(img_dir)
os.makedirs(target_dir)
os.makedirs(img_dir)
self.make_tutobook_sources_for_directory(
src_dir=Path(self.guides_dir),
target_dir=target_dir,
img_dir=img_dir,
site_img_dir="img/guides/",
github_repo_dir=str(GUIDES_GH_LOCATION),
)
# Examples
if examples:
for name in os.listdir(self.examples_dir):
path = Path(self.examples_dir) / name
if os.path.isdir(path):
target_dir = path / "md"
img_dir = path / "img"
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
if os.path.exists(img_dir):
shutil.rmtree(img_dir)
os.makedirs(target_dir)
os.makedirs(img_dir)
self.make_tutobook_sources_for_directory(
src_dir=path, # e.g. examples/nlp
target_dir=target_dir, # e.g. examples/nlp/md
img_dir=img_dir, # e.g. examples/nlp/img
site_img_dir="img/examples/" + name, # e.g. img/examples/nlp
github_repo_dir=str(EXAMPLES_GH_LOCATION / name),
)
def sync_tutobook_templates(self):
"""Copy generated `.md`s to source_dir.
Note: intro guides are copied to getting_started.
guides/md/ -> sources/guides/
guides/md/intro_* -> sources/getting_started/
examples/*/md/ -> sources/examples/*/
"""
# Guides
copy_inner_contents(
Path(self.guides_dir) / "md",
Path(self.templates_dir) / "guides",
ext=".md",
)
# Special cases
shutil.copyfile(
Path(self.templates_dir) / "guides" / "intro_to_keras_for_engineers.md",
Path(self.templates_dir)
/ "getting_started"
/ "intro_to_keras_for_engineers.md",
)
# Examples
for dir_name in os.listdir(Path(self.examples_dir)):
dir_path = Path(self.examples_dir) / dir_name # e.g. examples/nlp
if os.path.isdir(dir_path):
dst_dir = Path(self.templates_dir) / "examples" / dir_name
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
os.makedirs(dst_dir)
copy_inner_contents(dir_path / "md", dst_dir, ext=".md")
# Examples touch-up: add Keras version banner to each example
example_name_to_version = {}
for section in EXAMPLES_MASTER["children"]:
section_name = section["path"].replace("/", "")
for example in section["children"]:
example_name = section_name + "/" + example["path"]
if example.get("keras_3"):
version = 3
else:
version = 2
example_name_to_version[example_name] = version
for section_name in os.listdir(Path(self.templates_dir) / "examples"):
# e.g. templates/examples/nlp
dir_path = Path(self.templates_dir) / "examples" / section_name
if not os.path.isdir(dir_path):
continue
for example_fname in os.listdir(dir_path):
if example_fname.endswith(".md"):
md_path = dir_path / example_fname
with open(md_path) as f:
md_content = f.read()
example_name = (
section_name + "/" + example_fname.removesuffix(".md")
)
version = example_name_to_version.get(example_name, 2)
md_content_lines = md_content.split("\n")
for i, line in enumerate(md_content_lines):
if "View in Colab" in line:
md_content_lines.insert(
i,
f"<div class='example_version_banner keras_{version}'>ⓘ This example uses Keras {version}</div>",
)
break
md_content = "\n".join(md_content_lines) + "\n"
with open(md_path, "w") as f:
f.write(md_content)
def sync_tutobook_media(self):
"""Copy generated `.png`s to site_dir.
Note: intro guides are copied to getting_started.
guides/img/ -> site/img/guides/
examples/*/img/ -> site/img/examples/*/
"""
# Copy images for guide notebooks
for name in os.listdir(Path(self.guides_dir) / "img"):
path = Path(self.guides_dir) / "img" / name
if os.path.isdir(path):
shutil.copytree(path, Path(self.site_dir) / "img" / "guides" / name)
# Copy images for examples notebooks
for dir_name in os.listdir(Path(self.examples_dir)):
dir_path = Path(self.examples_dir) / dir_name
if os.path.isdir(dir_path):
if not os.path.exists(dir_path / "img"):
continue # No media was generated for this tutobook.
dst_dir = Path(self.site_dir) / "img" / "examples" / dir_name
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for name in os.listdir(dir_path / "img"):
path = dir_path / "img" / name
if os.path.isdir(path):
shutil.copytree(
path,
Path(self.site_dir) / "img" / "examples" / dir_name / name,
)
def make_nav_index(self):
max_depth = 4
path_stack = []
def make_nav_index_for_entry(entry, path_stack, max_depth):
if not isinstance(entry, dict):
raise ValueError("Incorrectly formatted entry: " f"{entry}")
path = entry["path"]
if path != "/":
path_stack.append(path)
url = self.url + str(Path(*path_stack)) + "/"
relative_url = "/" + str(Path(*path_stack)) + "/"
if len(path_stack) < max_depth:
children = [
make_nav_index_for_entry(child, path_stack[:], max_depth)
for child in entry.get("children", [])
]
else:
children = []
return {
"title": entry["title"],
"relative_url": relative_url,
"url": url,
"children": children,
}
return [
make_nav_index_for_entry(entry, path_stack[:], max_depth)
for entry in self.master["children"]
]
def make_md_source_for_entry(self, entry, path_stack, title_stack):
path = entry["path"]
if path != "/":
path_stack.append(path)
title_stack.append(entry["title"])
print("...Processing", Path(*path_stack))
parent_url = self.url + str(Path(*path_stack)) + "/"
if path.endswith("/"):
dir_path = Path(self.md_sources_dir) / Path(*path_stack)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
template_path = Path(self.templates_dir) / Path(*path_stack)
if path.endswith("/"):
template_path /= "index.md"
else:
template_path = template_path.with_suffix(".md")
if os.path.exists(template_path):
template_file = open(template_path, encoding="utf8")
template = template_file.read()
template_file.close()
else:
template = ""
if entry.get("toc"):
template += "{{toc}}\n\n"
if entry.get("generate"):
template += "{{autogenerated}}\n"
if not template.startswith("# "):
template = "# " + entry["title"] + "\n\n" + template
generate = entry.get("generate")
children = entry.get("children")
if generate:
generated_md = ""
for element in generate:
generated_md += self.docstring_printer.render(element)
if "{{autogenerated}}" not in template:
raise RuntimeError(
"Template found for %s but missing "
"{{autogenerated}} tag." % (template_path,)
)
template = template.replace("{{autogenerated}}", generated_md)
if entry.get("toc"):
if not children:
raise ValueError(
f"For template {template_path}, "
"a table of contents was requested but "
"the entry had no children."
)
toc = generate_md_toc(children, parent_url)
if "{{toc}}" not in template:
raise RuntimeError(
"Table of contents requested for %s but "
"missing {{toc}} tag." % (template_path,)
)
template = template.replace("{{toc}}", toc)
if "keras_nlp/" in path_stack and "models/" in path_stack:
template = render_tags.render_tags(template, keras_nlp)
if "keras_cv/" in path_stack and "models/" in path_stack:
template = render_tags.render_tags(template, keras_cv)
source_path = Path(self.md_sources_dir) / Path(*path_stack)
if path.endswith("/"):
md_source_path = source_path / "index.md"
metadata_path = source_path / "index_metadata.json"
else:
md_source_path = source_path.with_suffix(".md")
metadata_path = str(source_path) + "_metadata.json"
# Save md source file
autogen_utils.save_file(md_source_path, template)
# Save metadata file
location_history = []
for i in range(len(path_stack)):
stripped_path_stack = [s.strip("/") for s in path_stack[: i + 1]]
url = self.url + "/".join(stripped_path_stack) + "/"
location_history.append(
{
"url": url,
"title": title_stack[i],
}
)
metadata = json.dumps(
{
"location_history": location_history[:-1],
"outline": autogen_utils.make_outline(template)
if entry.get("outline", True)
else [],
"location": "/"
+ "/".join([s.replace("/", "") for s in path_stack])
+ "/",
"url": parent_url,
"title": entry["title"],
}
)
autogen_utils.save_file(metadata_path, metadata)
if children:
for entry in children:
self.make_md_source_for_entry(entry, path_stack[:], title_stack[:])
def make_map_of_symbol_names_to_api_urls(self):
def recursive_make_map(entry, current_url):
current_url /= entry["path"]
entry_map = {}
if "generate" in entry:
for symbol in entry["generate"]:
object_ = docstrings.import_object(symbol)
object_type = docstrings.get_type(object_)
object_name = symbol.split(".")[-1]
if symbol.startswith("tensorflow.keras."):
symbol = symbol.replace("tensorflow.keras.", "keras.")
object_name = object_name.lower().replace("_", "")
entry_map[symbol] = (
str(current_url) + "#" + object_name + "-" + object_type
)
if "children" in entry:
for child in entry["children"]:
entry_map.update(recursive_make_map(child, current_url))
return entry_map
self._map_of_symbol_names_to_api_urls = recursive_make_map(
self.master, Path("")
)
def generate_examples_landing_page(self):
"""Create the html file /examples/index.html.
- Load examples information and metadata
- Group them by category (e.g. CV) and subcategory (e.g. image classification)
- Render a card for each example
"""
examples_by_category = {}
category_names = []
category_paths = []
for child in self.master["children"]:
if child["path"] == "examples/":
examples_master = child
break
for category in examples_master["children"]:
category_name = category["title"]
category_names.append(category_name)
category_paths.append(category["path"])
examples_by_category[category_name] = category["children"]
categories_to_render = []
for category_name, category_path in zip(category_names, category_paths):
examples_by_subcategory = {}
subcategory_names = []
for example in examples_by_category[category_name]:
subcategory_name = example.get("subcategory", "Other")
if subcategory_name not in examples_by_subcategory:
examples_by_subcategory[subcategory_name] = []
subcategory_names.append(subcategory_name)
example["path"] = "/examples/" + category_path + example["path"]
examples_by_subcategory[subcategory_name].append(example)
subcategories_to_render = []
for subcategory_name in subcategory_names:
subcategories_to_render.append(
{
"title": subcategory_name,
"examples": examples_by_subcategory[subcategory_name],
}
)
category_dict = {
"title": category_name,
"path": "/examples/" + category_path,
}
if len(subcategories_to_render) > 1:
category_dict["subcategories"] = subcategories_to_render
else:
category_dict["examples"] = subcategories_to_render[0]["examples"]
categories_to_render.append(category_dict)
with open(Path(self.templates_dir) / "examples/index.md") as f:
md_content = f.read()
with open(Path(self.md_sources_dir) / "examples/index_metadata.json") as f:
metadata = json.loads(f.read())
examples_template = jinja2.Template(
open(Path(self.theme_dir) / "examples.html").read()
)
html_example_cards = examples_template.render(
{"categories": categories_to_render, "legend": True}
)
html_content = autogen_utils.render_markdown_to_html(md_content)
html_content = html_content.replace(
"<p>{{examples_list}}</p>", html_example_cards
)
html_content = insert_title_ids_in_html(html_content)
relative_url = "/examples/"
local_nav = [
autogen_utils.set_active_flag_in_nav_entry(entry, relative_url)
for entry in self.nav
]
self.render_single_docs_page_from_html(
target_path=Path(self.site_dir) / "examples/index.html",
title="Code examples",
html_content=html_content,
location_history=metadata["location_history"],
outline=metadata["outline"],
local_nav=local_nav,
relative_url=relative_url,
)
# Save per-category landing pages
for category_name, category_path in zip(category_names, category_paths):
with open(
Path(self.md_sources_dir)
/ "examples"
/ category_path
/ "index_metadata.json"
) as f:
metadata = json.loads(f.read())
relative_url = f"/examples/{category_path}"
local_nav = [
autogen_utils.set_active_flag_in_nav_entry(entry, relative_url)
for entry in self.nav
]
to_render = [
cat for cat in categories_to_render if cat["title"] == category_name
]
html_example_cards = examples_template.render(
{"categories": to_render, "legend": False}
)
self.render_single_docs_page_from_html(
target_path=Path(self.site_dir)
/ "examples"
/ category_path
/ "index.html",
title=category_name,
html_content=html_example_cards,
location_history=metadata["location_history"],
outline=metadata["outline"],
local_nav=local_nav,
relative_url=relative_url,
)
def render_md_sources_to_html(self):
self.make_map_of_symbol_names_to_api_urls()
print("Rendering md sources to HTML")
base_template = jinja2.Template(open(Path(self.theme_dir) / "base.html").read())
docs_template = jinja2.Template(open(Path(self.theme_dir) / "docs.html").read())
all_urls_list = []
if os.path.exists(self.site_dir):
print("Clearing", self.site_dir)
shutil.rmtree(self.site_dir)
if USE_MULTIPROCESSING:
for src_location, _, fnames in os.walk(self.md_sources_dir):
pool = multiprocessing.Pool(processes=8)
workers = [
pool.apply_async(
self.render_single_file,
args=(src_location, fname, self.nav),
)
for fname in fnames
]
for worker in workers:
url = worker.get()
if url is not None:
all_urls_list.append(url)
pool.close()
pool.join()
else:
for src_location, _, fnames in os.walk(self.md_sources_dir):
for fname in fnames:
print("...Rendering", fname)
self.render_single_file(src_location, fname, self.nav)
# Images & css
shutil.copytree(Path(self.theme_dir) / "css", Path(self.site_dir) / "css")
shutil.copytree(Path(self.theme_dir) / "img", Path(self.site_dir) / "img")
# Landing page
landing_template = jinja2.Template(
open(Path(self.theme_dir) / "landing.html").read()
)
landing_page = landing_template.render({"base_url": self.url})
autogen_utils.save_file(Path(self.site_dir) / "index.html", landing_page)
# Search page
search_main = open(Path(self.theme_dir) / "search.html").read()
search_page = base_template.render(
{
"title": "Search Keras documentation",
"nav": self.nav,
"base_url": self.url,
"main": search_main,
}
)
autogen_utils.save_file(Path(self.site_dir) / "search.html", search_page)
# 404 page
page404 = base_template.render(
{
"title": "Page not found",
"nav": self.nav,
"base_url": self.url,
"main": docs_template.render(
{
"title": "404",
"content": "<h1>404: Page not found</h1>",
"base_url": self.url,
}
),
}
)
autogen_utils.save_file(Path(self.site_dir) / "404.html", page404)
# Keras 3 announcement page
keras_3_template = jinja2.Template(
open(Path(self.theme_dir) / "keras_3.html").read()
)
md_content = open(
Path(self.templates_dir) / "keras_3" / "keras_3_announcement.md"
).read()
content = autogen_utils.render_markdown_to_html(md_content)
keras_core_page = keras_3_template.render(
{"base_url": self.url, "content": content}
)
autogen_utils.save_file(
Path(self.site_dir) / "keras_3" / "index.html",
keras_core_page,
)
# Favicon
shutil.copyfile(
Path(self.theme_dir) / "favicon.ico",
Path(self.site_dir) / "favicon.ico",
)
# Tutobooks
self.sync_tutobook_media()
sitemap = "\n".join(all_urls_list) + "\n"
autogen_utils.save_file(Path(self.site_dir) / "sitemap.txt", sitemap)
# Redirects
shutil.copytree(self.redirects_dir, self.site_dir, dirs_exist_ok=True)
# Examples landing page
self.generate_examples_landing_page()
def render_single_file(self, src_location, fname, nav):
if not fname.endswith(".md"):
return
src_dir = Path(src_location)
target_dir = src_location.replace(self.md_sources_dir, self.site_dir)
if not os.path.exists(target_dir):
try:
os.makedirs(target_dir)
except FileExistsError:
# Might be created by a concurrent process.
pass
# Load metadata for page
with open(str(Path(src_location) / fname[:-3]) + "_metadata.json") as f:
metadata = json.loads(f.read())
if fname == "index.md":
# Render as index.html
target_path = Path(target_dir) / "index.html"
relative_url = (str(target_dir) + "/").replace(self.site_dir, "/")
relative_url = relative_url.replace("//", "/")
else:
# Render as fname_no_ext/index.tml
fname_no_ext = ".".join(fname.split(".")[:-1])
full_target_dir = Path(target_dir) / fname_no_ext
os.makedirs(full_target_dir)
target_path = full_target_dir / "index.html"
relative_url = (str(full_target_dir) + "/").replace(self.site_dir, "/")
relative_url = relative_url.replace("//", "/")
if not relative_url.endswith("/"):
relative_url += "/"
md_file = open(src_dir / fname, encoding="utf-8")
md_content = md_file.read()
md_file.close()
md_content = replace_links(md_content)
# Convert Keras symbols to links to the Keras docs
for symbol, symbol_url in self._map_of_symbol_names_to_api_urls.items():
md_content = re.sub(
r"`((tf\.|)" + symbol + ")`",
r"[`\1`](" + symbol_url + ")",
md_content,
)
# Convert TF symbols to links to tensorflow.org
tmp_content = copy.copy(md_content)
replacements = {}
while "`tf." in tmp_content:
index = tmp_content.find("`tf.")
if tmp_content[index - 1] == "[":
tmp_content = tmp_content[tmp_content.find("`tf.") + 1 :]
tmp_content = tmp_content[tmp_content.find("`") + 1 :]
else:
tmp_content = tmp_content[tmp_content.find("`tf.") + 1 :]
symbol = tmp_content[: tmp_content.find("`")]
tmp_content = tmp_content[tmp_content.find("`") + 1 :]
if "/" not in symbol and "(" not in symbol:
# Check if we're looking at a method on a class
symbol_parts = symbol.split(".")
if len(symbol_parts) >= 3 and symbol_parts[-2][0].isupper():
# In this case the link should look like ".../class#method"
path = "/".join(symbol_parts[:-1]) + "#" + symbol_parts[-1]
else:
# Otherwise just ".../module/class_or_fn"
path = symbol.replace(".", "/")
path = path.replace("(", "")
path = path.replace(")", "")
replacements["`" + symbol + "`"] = (
"[`"
+ symbol
+ "`](https://www.tensorflow.org/api_docs/python/"
+ path
+ ")"
)
for key, value in replacements.items():
md_content = md_content.replace(key, value)
html_content = autogen_utils.render_markdown_to_html(md_content)
html_content = insert_title_ids_in_html(html_content)
local_nav = [
autogen_utils.set_active_flag_in_nav_entry(entry, relative_url)
for entry in nav
]
title = md_content[2 : md_content.find("\n")]
self.render_single_docs_page_from_html(
target_path,
title,
html_content,
metadata["location_history"],
metadata["outline"],
local_nav,
relative_url,
)
return relative_url
def render_single_docs_page_from_html(
self,
target_path,
title,
html_content,
location_history,
outline,
local_nav,
relative_url,
):
base_template = jinja2.Template(open(Path(self.theme_dir) / "base.html").read())
docs_template = jinja2.Template(open(Path(self.theme_dir) / "docs.html").read())
html_docs = docs_template.render(
{
"title": title,
"content": html_content,
"location_history": location_history,
"base_url": self.url,
"outline": outline,
}
)
html_page = base_template.render(
{
"title": title,
"nav": local_nav,
"base_url": self.url,
"main": html_docs,
"relative_url": relative_url,
}
)
html_page = html_page.replace("../guides/img/", "/img/guides/")
autogen_utils.save_file(target_path, html_page)
def make(self):
self.make_md_sources()
self.render_md_sources_to_html()
self.make_tutobook_ipynbs()
def serve(self):
os.chdir(self.site_dir)
socketserver.ThreadingTCPServer.allow_reuse_address = True
server = socketserver.ThreadingTCPServer(
("", 8000), http.server.SimpleHTTPRequestHandler
)
server.daemon_threads = True
def signal_handler(signal, frame):
try:
if server:
server.server_close()
finally:
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
try:
print("Serving on 0.0.0.0:8000")
server.serve_forever()
except KeyboardInterrupt:
pass
finally:
server.server_close()
def replace_links(content):
# Make sure all Keras guides point to keras.io.
for entry in generate_tf_guides.CONFIG:
keras_name = entry["source_name"]
tf_name = entry["target_name"]
content = content.replace(
"https://www.tensorflow.org/guide/keras/" + tf_name,
"https://keras.io/guides/" + keras_name,
)
return content
def strip_markdown_tags(md):
# Strip links
md = re.sub(r"\[(.*?)\]\(.*?\)", r"\1", md)
return md
def copy_inner_contents(src, dst, ext=".md"):
for fname in os.listdir(src):
fpath = Path(src) / fname
fdst = Path(dst) / fname
if fname.endswith(ext):
shutil.copyfile(fpath, fdst)
if os.path.isdir(fpath):
copy_inner_contents(fpath, fdst, ext)
def insert_title_ids_in_html(html):
marker = "replace_me_with_id_for:"
marker_end = ":end_of_title"
for i in range(1, 5):
match = "<h" + str(i) + ">(.*?)</h" + str(i) + ">"
replace = (
"<h"
+ str(i)
+ r' id="'
+ marker
+ r"\1"
+ marker_end
+ r'">\1</h'
+ str(i)
+ ">"
)
html = re.sub(match, replace, html)
while 1:
start = html.find(marker)
if start == -1:
break
title = html[start + len(marker) :]
title = title[: title.find(marker_end)]
normalized_title = title
normalized_title = normalized_title.replace("<code>", "")
normalized_title = normalized_title.replace("</code>", "")
if ">" in normalized_title:
normalized_title = normalized_title[normalized_title.find(">") + 1 :]
normalized_title = normalized_title[: normalized_title.find("</")]
normalized_title = autogen_utils.turn_title_into_id(normalized_title)
html = html.replace(marker + title + marker_end, normalized_title)
return html
def generate_md_toc(entries, url, depth=2):
assert url.endswith("/")
entries = [e for e in entries if not e.get("skip_from_toc")]
generated = ""
if set(len(x.get("generate", [])) for x in entries) == {1}:
print_generate = False
else:
print_generate = True
for entry in entries:
title = entry["title"]
path = entry["path"]
if not path.endswith("/"):
path += "/"
full_url = url + path
children = entry.get("children")
generate = entry.get("generate")
if children or (print_generate and generate):
title_prefix = "### "
else:
title_prefix = "- "
generated += title_prefix + "[{title}]({full_url})\n".format(
title=title, full_url=full_url
)
if children:
for child in children:
if child.get("skip_from_toc", False):
continue
child_title = child["title"]
child_path = child["path"]
child_url = full_url + child_path
generated += "- [{child_title}]({child_url})\n".format(
child_title=child_title, child_url=child_url
)
generated += "\n"
elif generate and print_generate:
for gen in generate:
obj = docstrings.import_object(gen)
obj_name = docstrings.get_name(obj)
obj_type = docstrings.get_type(obj)
link = "{full_url}#{obj_name}-{obj_type}".format(
full_url=full_url, obj_name=obj_name, obj_type=obj_type
).lower()
name = gen.split(".")[-1]
generated += "- [{name} {obj_type}]({link})\n".format(
name=name, obj_type=obj_type, link=link
)
generated += "\n"
return generated
def get_working_dir(arg):
if not arg.startswith("--working_dir="):
return None
return arg[len("--working_dir=") :]
if __name__ == "__main__":
root = Path(__file__).parent.parent.resolve()
keras_io = KerasIO(
master=MASTER,
url=os.path.sep,
templates_dir=os.path.join(root, "templates"),
md_sources_dir=os.path.join(root, "sources"),
site_dir=os.path.join(root, "site"),
theme_dir=os.path.join(root, "theme"),
guides_dir=os.path.join(root, "guides"),
examples_dir=os.path.join(root, "examples"),
redirects_dir=os.path.join(root, "redirects"),
refresh_guides=False,
refresh_examples=False,
)
cmd = sys.argv[1]
if cmd not in {
"make",
"serve",
"add_example",
"add_guide",
}:
raise ValueError(
"Must specify command `make`, `serve`, `add_example`, or `add_guide`."
)
if cmd in {"add_example", "add_guide"}:
if not len(sys.argv) in (3, 4):
raise ValueError(
"Must specify example/guide to add, e.g. "
"`autogen.py add_example vision/cats_and_dogs`"
)
if cmd == "make":
keras_io.make_md_sources()
keras_io.render_md_sources_to_html()
elif cmd == "serve":
keras_io.serve()
elif cmd == "add_example":
keras_io.add_example(
sys.argv[2],
working_dir=get_working_dir(sys.argv[3]) if len(sys.argv) == 4 else None,
)
elif cmd == "add_guide":
tutobooks.MAX_LOC = 500
keras_io.add_guide(
sys.argv[2],
working_dir=get_working_dir(sys.argv[3]) if len(sys.argv) == 4 else None,
)
| keras-io/scripts/autogen.py/0 | {
"file_path": "keras-io/scripts/autogen.py",
"repo_id": "keras-io",
"token_count": 23137
} | 127 |
# About Keras 3
Keras is a deep learning API written in Python and capable of running on top of either [JAX](https://jax.readthedocs.io/),
[TensorFlow](https://github.com/tensorflow/tensorflow),
or [PyTorch](https://pytorch.org/).
Keras is:
- **Simple** -- but not simplistic. Keras reduces developer *cognitive load* to free you to focus on the parts of the problem that really matter.
- **Flexible** -- Keras adopts the principle of *progressive disclosure of complexity*: simple workflows should be quick and easy,
while arbitrarily advanced workflows should be *possible* via a clear path that builds upon what you've already learned.
- **Powerful** -- Keras provides industry-strength performance and scalability: it is used by organizations including NASA, YouTube, or Waymo.
---
## Keras 3 is a multi-framework deep learning API
As a multi-framework API, Keras can be used to develop modular components that are compatible with any framework -- JAX, TensorFlow, or PyTorch.
This approach has several key benefits:
- **Always get the best performance for your models.** In our benchmarks,
we found that JAX typically delivers the best training and inference performance
on GPU, TPU, and CPU -- but results vary from model to model, as non-XLA
TensorFlow is occasionally faster on GPU. The ability to dynamically select
the backend that will deliver the best performance for your model
*without having to change anything to your code* means you're always guaranteed
to train and serve with the highest achievable efficiency.
- **Maximize available ecosystem surface for your models.** Any Keras
model can be instantiated as a PyTorch `Module`, can be exported as a TensorFlow
`SavedModel`, or can be instantiated as a stateless JAX function. That means
that you can use your Keras models with PyTorch ecosystem packages,
with the full range of TensorFlow deployment & production tools, and with
JAX large-scale TPU training infrastructure. Write one `model.py` using
Keras APIs, and get access to everything the ML world has to offer.
- **Maximize distribution for your open-source model releases.** Want to
release a pretrained model? Want as many people as possible
to be able to use it? If you implement it in pure TensorFlow or PyTorch,
it will be usable by roughly half of the market.
If you implement it in Keras, it is instantly usable by anyone regardless
of their framework of choice (even if they're not Keras users).
Twice the impact at no added development cost.
- **Use data pipelines from any source.** The Keras
`fit()`/`evaluate()`/`predict()` routines are compatible with `tf.data.Dataset` objects,
with PyTorch `DataLoader` objects, with NumPy arrays, Pandas dataframes --
regardless of the backend you're using. You can train a Keras + TensorFlow
model on a PyTorch `DataLoader` or train a Keras + PyTorch model on a
`tf.data.Dataset`.
---
## First contact with Keras
The core data structures of Keras are __layers__ and __models__.
The simplest type of model is the [`Sequential` model](/guides/sequential_model/), a linear stack of layers.
For more complex architectures, you should use the [Keras functional API](/guides/functional_api/),
which allows to build arbitrary graphs of layers, or [write models entirely from scratch via subclasssing](/guides/making_new_layers_and_models_via_subclassing/).
Here is the `Sequential` model:
```python
import keras
model = keras.Sequential()
```
Stacking layers is as easy as `.add()`:
```python
from keras import layers
model.add(layers.Dense(units=64, activation='relu'))
model.add(layers.Dense(units=10, activation='softmax'))
```
Once your model looks good, configure its learning process with `.compile()`:
```python
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
```
If you need to, you can further configure your optimizer. The Keras philosophy is to keep simple things simple,
while allowing the user to be fully in control when they need to
(the ultimate control being the easy extensibility of the source code via subclassing).
```python
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True))
```
You can now iterate on your training data in batches:
```python
# x_train and y_train are Numpy arrays
model.fit(x_train, y_train, epochs=5, batch_size=32)
```
Evaluate your test loss and metrics in one line:
```python
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)
```
Or generate predictions on new data:
```python
classes = model.predict(x_test, batch_size=128)
```
What you just saw is the most elementary way to use Keras.
However, Keras is also a highly-flexible framework suitable to iterate on state-of-the-art research ideas.
Keras follows the principle of **progressive disclosure of complexity**: it makes it easy to get started,
yet it makes it possible to handle arbitrarily advanced use cases,
only requiring incremental learning at each step.
In much the same way that you were able to train and evaluate a simple neural network above in a few lines,
you can use Keras to quickly develop new training procedures or state-of-the-art model architectures.
Here's an example of a custom Keras layer -- which can be used in low-level
workflows in JAX, TensorFlow, or PyTorch, interchangeably:
```python
import keras
from keras import ops
class TokenAndPositionEmbedding(keras.Layer):
def __init__(self, max_length, vocab_size, embed_dim):
super().__init__()
self.token_embed = self.add_weight(
shape=(vocab_size, embed_dim),
initializer="random_uniform",
trainable=True,
)
self.position_embed = self.add_weight(
shape=(max_length, embed_dim),
initializer="random_uniform",
trainable=True,
)
def call(self, token_ids):
# Embed positions
length = token_ids.shape[-1]
positions = ops.arange(0, length, dtype="int32")
positions_vectors = ops.take(self.position_embed, positions, axis=0)
# Embed tokens
token_ids = ops.cast(token_ids, dtype="int32")
token_vectors = ops.take(self.token_embed, token_ids, axis=0)
# Sum both
embed = token_vectors + positions_vectors
# Normalize embeddings
power_sum = ops.sum(ops.square(embed), axis=-1, keepdims=True)
return embed / ops.sqrt(ops.maximum(power_sum, 1e-7))
```
For more in-depth tutorials about Keras, you can check out:
- [Introduction to Keras for engineers](/getting_started/intro_to_keras_for_engineers/)
- [Developer guides](/guides/)
---
## Support
You can ask questions and join the development discussion on the [Keras Google group](https://groups.google.com/forum/#!forum/keras-users).
You can also post **bug reports and feature requests** (only) in [GitHub issues](https://github.com/keras-team/keras/issues).
Make sure to read [our guidelines](https://github.com/keras-team/keras-io/blob/master/templates/contributing.md) first.
---
## Why this name, Keras?
Keras (κέρας) means _horn_ in ancient Greek. It is a reference to a literary image from ancient Greek and Latin literature, first found in the _Odyssey_, where dream spirits (_Oneiroi_, singular _Oneiros_) are divided between those who deceive dreamers with false visions, who arrive to Earth through a gate of ivory, and those who announce a future that will come to pass, who arrive through a gate of horn. It's a play on the words κέρας (horn) / κραίνω (fulfill), and ἐλέφας (ivory) / ἐλεφαίρομαι (deceive).
Keras was initially developed as part of the research effort of project ONEIROS (Open-ended Neuro-Electronic Intelligent Robot Operating System).
>_"Oneiroi are beyond our unravelling - who can be sure what tale they tell? Not all that men look for comes to pass. Two gates there are that give passage to fleeting Oneiroi; one is made of horn, one of ivory. The Oneiroi that pass through sawn ivory are deceitful, bearing a message that will not be fulfilled; **those that come out through polished horn have truth behind them, to be accomplished for men who see them.**"_ Homer, Odyssey 19. 562 ff (Shewring translation).
| keras-io/templates/about.md/0 | {
"file_path": "keras-io/templates/about.md",
"repo_id": "keras-io",
"token_count": 2547
} | 128 |
# KerasNLP Models
KerasNLP contains end-to-end implementations of popular model architectures.
These models can be created in two ways:
- Through the `from_preset()` constructor, which instantiates an object with
a pre-trained configurations, vocabularies, and (optionally) weights.
- Through custom configuration controlled by the user.
Below, we list all presets available in the library. For more detailed usage,
browse the docstring for a particular class. For an in depth introduction
to our API, see the [getting started guide](/guides/keras_nlp/getting_started/).
## Backbone presets
The following preset names correspond to a configuration, weights and vocabulary
for a model **backbone**. These presets are not inference-ready, and must be
fine-tuned for a given task!
The names below can be used with any `from_preset()` constructor for a given model.
```python
classifier = keras_nlp.models.BertClassifier.from_preset("bert_tiny_en_uncased")
backbone = keras_nlp.models.BertBackbone.from_preset("bert_tiny_en_uncased")
tokenizer = keras_nlp.models.BertTokenizer.from_preset("bert_tiny_en_uncased")
preprocessor = keras_nlp.models.BertPreprocessor.from_preset("bert_tiny_en_uncased")
```
{{backbone_presets_table}}
**Note**: The links provided will lead to the model card or to the official README,
if no model card has been provided by the author.
## Classification presets
The following preset names correspond to a configuration, weights and vocabulary
for a model **classifier**. These models are inference ready, but can be further
fine-tuned if desired.
The names below can be used with the `from_preset()` constructor for classifier models
and preprocessing layers.
```python
classifier = keras_nlp.models.BertClassifier.from_preset("bert_tiny_en_uncased_sst2")
tokenizer = keras_nlp.models.BertTokenizer.from_preset("bert_tiny_en_uncased_sst2")
preprocessor = keras_nlp.models.BertPreprocessor.from_preset("bert_tiny_en_uncased_sst2")
```
{{classifier_presets_table}}
**Note**: The links provided will lead to the model card or to the official README,
if no model card has been provided by the author.
## API Documentation
{{toc}}
| keras-io/templates/api/keras_nlp/models/index.md/0 | {
"file_path": "keras-io/templates/api/keras_nlp/models/index.md",
"repo_id": "keras-io",
"token_count": 641
} | 129 |
# Probabilistic losses
Losses estimating distances between probability distributions.
{{autogenerated}} | keras-io/templates/api/losses/probabilistic_losses/index.md/0 | {
"file_path": "keras-io/templates/api/losses/probabilistic_losses/index.md",
"repo_id": "keras-io",
"token_count": 24
} | 130 |
# Getting started with Keras
## Learning resources
Are you a machine learning engineer looking for a Keras introduction one-pager?
Read our guide [Introduction to Keras for engineers](/getting_started/intro_to_keras_for_engineers/).
Want to learn more about Keras 3 and its capabilities? See the [Keras 3 launch announcement](/keras_3/).
Are you looking for detailed guides covering in-depth usage of different parts of the Keras API?
Read our [Keras developer guides](/guides/).
Are you looking for tutorials showing Keras in action across a wide range of use cases?
See the [Keras code examples](/examples/): over 150 well-explained notebooks demonstrating Keras best practices
in computer vision, natural language processing, and generative AI.
---
## Installing Keras 3
You can install Keras from PyPI via:
```
pip install --upgrade keras
```
You can check your local Keras version number via:
```python
import keras
print(keras.__version__)
```
To use Keras 3, you will also need to install a backend framework -- either JAX, TensorFlow, or PyTorch:
- [Installing JAX](https://jax.readthedocs.io/en/latest/installation.html)
- [Installing TensorFlow](https://www.tensorflow.org/install)
- [Installing PyTorch](https://pytorch.org/get-started/locally/)
If you install TensorFlow, critically, **you should reinstall Keras 3 afterwards**.
This is a temporary step while TensorFlow is pinned to Keras 2, and will no longer be necessary after TensorFlow 2.16.
The cause is that `tensorflow==2.15` will overwrite your Keras installation with `keras==2.15`.
### Installing KerasCV and KerasNLP
KerasCV and KerasNLP can be installed via pip:
```
pip install --upgrade keras-cv
pip install --upgrade keras-nlp
pip install --upgrade keras
```
Critically, **you should reinstall Keras 3 after installing KerasNLP**.
This is a temporary step while TensorFlow is pinned to Keras 2, and will no longer be necessary after TensorFlow 2.16.
The cause is that `keras-nlp` depends on `tensorflow-text`, which will install `tensorflow==2.15`, which will
overwrite your Keras installation with `keras==2.15`.
---
## Configuring your backend
You can export the environment variable `KERAS_BACKEND`
or you can edit your local config file at `~/.keras/keras.json` to configure your backend.
Available backend options are: `"jax"`, `"tensorflow"`, `"torch"`. Example:
```
export KERAS_BACKEND="jax"
```
In Colab, you can do:
```python
import os
os.environ["KERAS_BACKEND"] = "jax"
import keras
```
**Note:** The backend must be configured before importing Keras, and the backend cannot be changed after the package has been imported.
### GPU dependencies
#### Colab or Kaggle
If you are running on Colab or Kaggle, the GPU should already be configured, with the correct CUDA version.
Installing a newer version of CUDA on Colab or Kaggle is typically not possible. Even though pip installers exist,
they rely on a pre-installed NVIDIA driver and there is no way to update the driver on Colab or Kaggle.
#### Universal GPU environment
If you want to attempt to create a "universal environment" where any backend can use the GPU, we recommend following
[the dependency versions used by Colab](https://colab.sandbox.google.com/drive/13cpd3wCwEHpsmypY9o6XB6rXgBm5oSxu)
(which seeks to solve this exact problem). You can install the CUDA driver [from here](https://developer.nvidia.com/cuda-downloads),
then pip install backends by following their respective CUDA installation instructions:
[Installing JAX](https://jax.readthedocs.io/en/latest/installation.html),
[Installing TensorFlow](https://www.tensorflow.org/install),
[Installing PyTorch](https://pytorch.org/get-started/locally/)
#### Most stable GPU environment
This setup is recommended if you are a Keras contributor and are running Keras tests. It installs all backends but only
gives GPU access to one backend at a time, avoiding potentially conflicting dependency requirements between backends.
You can use the following backend-specific requirements files:
- [requirements-jax-cuda.txt](https://github.com/keras-team/keras/blob/master/requirements-jax-cuda.txt)
- [requirements-tensorflow-cuda.txt](https://github.com/keras-team/keras/blob/master/requirements-tensorflow-cuda.txt)
- [requirements-torch-cuda.txt](https://github.com/keras-team/keras/blob/master/requirements-torch-cuda.txt)
These install all CUDA-enabled dependencies via pip. They expect a NVIDIA driver to be preinstalled.
We recommend a clean python environment for each backend to avoid CUDA version mismatches.
As an example, here is how to create a JAX GPU environment with [Conda](https://docs.conda.io/en/latest/):
```
conda create -y -n keras-jax python=3.10
conda activate keras-jax
pip install -r requirements-jax-cuda.txt
pip install --upgrade keras
```
---
## TensorFlow + Keras 2 backwards compatibility
From TensorFlow 2.0 to TensorFlow 2.15 (included), doing `pip install tensorflow` will also
install the corresponding version of Keras 2 -- for instance, `pip install tensorflow==2.14.0` will
install `keras==2.14.0`. That version of Keras is then available via both `import keras` and `from tensorflow import keras`
(the `tf.keras` namespace).
Starting with TensorFlow 2.16, doing `pip install tensorflow` will install Keras 3. When you have TensorFlow >= 2.16
and Keras 3, then by default `from tensorflow import keras` (`tf.keras`) will be Keras 3.
Meanwhile, the legacy Keras 2 package is still being released regularly and is available on PyPI as `tf_keras`
(or equivalently `tf-keras` -- note that `-` and `_` are equivalent in PyPI package names).
To use it, you can install it via `pip install tf_keras` then import it via `import tf_keras as keras`.
Should you want `tf.keras` to stay on Keras 2 after upgrading to TensorFlow 2.16+, you can configure your TensorFlow installation
so that `tf.keras` points to `tf_keras`. To achieve this:
1. Make sure to install `tf_keras`. Note that TensorFlow does not install by default.
2. Export the environment variable `TF_USE_LEGACY_KERAS=1`.
There are several ways to export the environment variable:
1. You can simply run the shell command `export TF_USE_LEGACY_KERAS=1` before launching the Python interpreter.
2. You can add `export TF_USE_LEGACY_KERAS=1` to your `.bashrc` file. That way the variable will still be exported when you restart your shell.
3. You can start your Python script with:
```python
import os
os.environ["TF_USE_LEGACY_KERAS"] = "1"
```
These lines would need to be before any `import tensorflow` statement.
---
## Compatibility matrix
### JAX compatibility
The following Keras + JAX versions are compatible with each other:
- `jax==0.4.20` & `keras==3.0.0`
### TensorFlow compatibility
The following Keras + TensorFlow versions are compatible with each other:
To use Keras 2:
- `tensorflow==2.13.0` & `keras==2.13.0`
- `tensorflow==2.14.0` & `keras==2.14.0`
- `tensorflow==2.15.0` & `keras==2.15.0`
To use Keras 3:
- `tensorflow==2.15.0` & `keras==3.0.0`
- `tensorflow==2.16.0` & `keras==3.0.0`
### PyTorch compatibility
The following Keras + PyTorch versions are compatible with each other:
- `torch==2.1.0` & `keras==3.0.0`
| keras-io/templates/getting_started/index.md/0 | {
"file_path": "keras-io/templates/getting_started/index.md",
"repo_id": "keras-io",
"token_count": 2229
} | 131 |
.codehilite {
padding: 1rem;
margin-bottom: 1rem;
padding-bottom: 0.01rem;
font-size: 0.92rem;
}
@media screen and (max-width: 840px) {
.codehilite {
font-size: 0.85rem;
}
}
.codehilite pre {
color: #ced4da;
}
.k-default-codeblock .codehilite pre {
color: #444;
}
.k-default-codeblock .codehilite code span {
color: #444 !important;
}
.k-default-codeblock .codehilite {
color: #444 !important;
background-color: #eee !important;
}
.codehilite .hll { background-color: #49483e }
.codehilite { background: #000000; color: #ffffff }
.codehilite .c { color: #75715e } /* Comment */
.codehilite .err { color: #b38aff } /* Error */
.codehilite .k { color: #66d9ef } /* Keyword */
.codehilite .l { color: #ae81ff } /* Literal */
.codehilite .n { color: #f8f8f2 } /* Name */
.codehilite .o { color: #f92672 } /* Operator */
.codehilite .p { color: #f8f8f2 } /* Punctuation */
.codehilite .ch { color: #908d81 } /* Comment.Hashbang */
.codehilite .cm { color: #908d81 } /* Comment.Multiline */
.codehilite .cp { color: #908d81 } /* Comment.Preproc */
.codehilite .cpf { color: #908d81 } /* Comment.PreprocFile */
.codehilite .c1 { color: #908d81 } /* Comment.Single */
.codehilite .cs { color: #908d81 } /* Comment.Special */
.codehilite .gd { color: #f92672 } /* Generic.Deleted */
.codehilite .ge { font-style: italic } /* Generic.Emph */
.codehilite .gi { color: #a6e22e } /* Generic.Inserted */
.codehilite .gs { font-weight: bold } /* Generic.Strong */
.codehilite .gu { color: #75715e } /* Generic.Subheading */
.codehilite .kc { color: #66d9ef } /* Keyword.Constant */
.codehilite .kd { color: #66d9ef } /* Keyword.Declaration */
.codehilite .kn { color: #ff4185 } /* Keyword.Namespace */
.codehilite .kp { color: #66d9ef } /* Keyword.Pseudo */
.codehilite .kr { color: #66d9ef } /* Keyword.Reserved */
.codehilite .kt { color: #66d9ef } /* Keyword.Type */
.codehilite .ld { color: #e6db74 } /* Literal.Date */
.codehilite .m { color: #ae81ff } /* Literal.Number */
.codehilite .s { color: #e6db74 } /* Literal.String */
.codehilite .na { color: #a6e22e } /* Name.Attribute */
.codehilite .nb { color: #f8f8f2 } /* Name.Builtin */
.codehilite .nc { color: #a6e22e } /* Name.Class */
.codehilite .no { color: #66d9ef } /* Name.Constant */
.codehilite .nd { color: #a6e22e } /* Name.Decorator */
.codehilite .ni { color: #f8f8f2 } /* Name.Entity */
.codehilite .ne { color: #a6e22e } /* Name.Exception */
.codehilite .nf { color: #a6e22e } /* Name.Function */
.codehilite .nl { color: #f8f8f2 } /* Name.Label */
.codehilite .nn { color: #ffffff } /* Name.Namespace */
.codehilite .nx { color: #a6e22e } /* Name.Other */
.codehilite .py { color: #f8f8f2 } /* Name.Property */
.codehilite .nt { color: #f92672 } /* Name.Tag */
.codehilite .nv { color: #f8f8f2 } /* Name.Variable */
.codehilite .ow { color: #f92672 } /* Operator.Word */
.codehilite .w { color: #f8f8f2 } /* Text.Whitespace */
.codehilite .mb { color: #ae81ff } /* Literal.Number.Bin */
.codehilite .mf { color: #ae81ff } /* Literal.Number.Float */
.codehilite .mh { color: #ae81ff } /* Literal.Number.Hex */
.codehilite .mi { color: #ae81ff } /* Literal.Number.Integer */
.codehilite .mo { color: #ae81ff } /* Literal.Number.Oct */
.codehilite .sa { color: #e6db74 } /* Literal.String.Affix */
.codehilite .sb { color: #e6db74 } /* Literal.String.Backtick */
.codehilite .sc { color: #e6db74 } /* Literal.String.Char */
.codehilite .dl { color: #e6db74 } /* Literal.String.Delimiter */
.codehilite .sd { color: #e6db74 } /* Literal.String.Doc */
.codehilite .s2 { color: #e6db74 } /* Literal.String.Double */
.codehilite .se { color: #ae81ff } /* Literal.String.Escape */
.codehilite .sh { color: #e6db74 } /* Literal.String.Heredoc */
.codehilite .si { color: #e6db74 } /* Literal.String.Interpol */
.codehilite .sx { color: #e6db74 } /* Literal.String.Other */
.codehilite .sr { color: #e6db74 } /* Literal.String.Regex */
.codehilite .s1 { color: #e6db74 } /* Literal.String.Single */
.codehilite .ss { color: #e6db74 } /* Literal.String.Symbol */
.codehilite .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
.codehilite .fm { color: #a6e22e } /* Name.Function.Magic */
.codehilite .vc { color: #f8f8f2 } /* Name.Variable.Class */
.codehilite .vg { color: #f8f8f2 } /* Name.Variable.Global */
.codehilite .vi { color: #f8f8f2 } /* Name.Variable.Instance */
.codehilite .vm { color: #f8f8f2 } /* Name.Variable.Magic */
.codehilite .il { color: #ae81ff } /* Literal.Number.Integer.Long */
| keras-io/theme/css/monokai.css/0 | {
"file_path": "keras-io/theme/css/monokai.css",
"repo_id": "keras-io",
"token_count": 1887
} | 132 |
{
"dockerFile": "Dockerfile",
"customizations": {
"vscode": {
"settings": {
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.linting.pylintEnabled": false,
"python.testing.pytestEnabled": true,
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": true
},
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"editor.rulers": [
80
]
},
"extensions": [
"ms-python.python",
"ms-python.isort",
"ms-python.flake8",
"ms-python.black-formatter"
]
}
},
"features": {
"ghcr.io/devcontainers/features/github-cli:1": {}
},
"postCreateCommand": "sh /setup.sh"
}
| keras-nlp/.devcontainer/devcontainer.json/0 | {
"file_path": "keras-nlp/.devcontainer/devcontainer.json",
"repo_id": "keras-nlp",
"token_count": 613
} | 133 |
# Code of Conduct
This project follows
[Google's Open Source Community Guidelines](https://opensource.google/conduct/).
| keras-nlp/CODE_OF_CONDUCT.md/0 | {
"file_path": "keras-nlp/CODE_OF_CONDUCT.md",
"repo_id": "keras-nlp",
"token_count": 29
} | 134 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split sentences from raw input documents using nltk.
A script to sentence split a raw dataset (e.g. wikipedia or bookscorpus) into
sentences for further preproessing for BERT. The output file format is the
format expected by `create_pretraining_data.py`, where each file contains one
line per sentence, with empty newlines between documents.
This script will run muliprocessed, and the number of concurrent process and
output file shards can be controlled with `--num_jobs` and `--num_shards`.
Usage:
python examples/tools/create_sentence_split_data.py \
--input_files ~/datasets/wikipedia,~/datasets/bookscorpus \
--output_directory ~/datasets/bert-sentence-split-data
"""
import contextlib
import multiprocessing
import os
import random
import sys
import nltk
from absl import app
from absl import flags
from tensorflow import keras
from examples.utils.scripting_utils import list_filenames_for_arg
FLAGS = flags.FLAGS
flags.DEFINE_string(
"input_files",
None,
"Comma seperated list of directories, files, or globs for input data.",
)
flags.DEFINE_string(
"output_directory",
None,
"Directory for output data.",
)
flags.DEFINE_integer("num_jobs", None, "Number of file shards to use.")
flags.DEFINE_integer("num_shards", 500, "Number of file shards to use.")
flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
def parse_wiki_file(file):
"""Read documents from a wikipedia dump file."""
documents = []
in_article = False
article_lines = []
for line in file:
line = line.strip()
# Skip empty lines.
if line == "":
continue
elif "<doc id=" in line:
in_article = True
elif "</doc>" in line:
in_article = False
# There are many wikipedia articles that are only titles (one
# line) or or redirects (two lines), we will skip these.
if len(article_lines) > 2:
# Skip the title.
documents.append(" ".join(article_lines[1:]))
article_lines = []
elif in_article:
article_lines.append(line)
return documents
def parse_text_file(file):
"""Read documents from a plain text file."""
documents = []
file_lines = []
for line in file:
line = line.strip()
# Skip empty lines.
if line == "":
continue
file_lines.append(line)
documents.append(" ".join(file_lines))
return documents
def read_file(filename):
"""Read documents from an input file."""
with open(filename, mode="r") as file:
firstline = file.readline()
file.seek(0)
# Very basic autodetection of file type.
# Wikipedia dump files all start with a doc id tag.
if "<doc id=" in firstline:
return parse_wiki_file(file)
return parse_text_file(file)
def process_file(filename):
"""Read documents from an input file and split into sentences with nltk."""
split_documents = []
for document in read_file(filename):
sentences = nltk.tokenize.sent_tokenize(document)
split_documents.append(sentences)
return split_documents
def main(_):
nltk.download("punkt")
print(f"Reading input data from {FLAGS.input_files}")
input_filenames = list_filenames_for_arg(FLAGS.input_files)
if not input_filenames:
print("No input files found. Check `input_files` flag.")
sys.exit(1)
# Randomize files so we aren't processing input directories sequentially.
rng = random.Random(FLAGS.random_seed)
rng.shuffle(input_filenames)
# We will read and sentence split with multiprocessing, but write from
# a single thread to balance our shard sizes well.
pool = multiprocessing.Pool(FLAGS.num_jobs)
print(f"Outputting to {FLAGS.output_directory}.")
if not os.path.exists(FLAGS.output_directory):
os.mkdir(FLAGS.output_directory)
progbar = keras.utils.Progbar(len(input_filenames), unit_name="files")
progbar.update(0)
with contextlib.ExitStack() as stack:
# Open all files.
output_files = []
for i in range(FLAGS.num_shards):
path = os.path.join(FLAGS.output_directory, f"shard_{i}.txt")
output_files.append(stack.enter_context(open(path, "w")))
# Write documents to disk.
total_files = 0
total_documents = 0
for documents in pool.imap_unordered(process_file, input_filenames):
for document in documents:
output_file = output_files[total_documents % FLAGS.num_shards]
for sentence in document:
output_file.write(sentence + "\n")
# Blank newline marks a new document.
output_file.write("\n")
total_documents += 1
total_files += 1
progbar.update(total_files)
print("Done.")
print(f"Read {total_files} files.")
print(f"Processed {total_documents} documents.")
if __name__ == "__main__":
flags.mark_flag_as_required("input_files")
flags.mark_flag_as_required("output_directory")
app.run(main)
| keras-nlp/examples/tools/split_sentences.py/0 | {
"file_path": "keras-nlp/examples/tools/split_sentences.py",
"repo_id": "keras-nlp",
"token_count": 2193
} | 135 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.