Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def residual_block(x, double_filters=False, filters=None):
assert not (double_filters and filters)
# compute dimensions
in_filters = x.get_shape().as_list()[-1]
out_filters = filters or in_filters if not double_filters else 2 * in_filters
shortcut = x
if in_filters != out_filters:
shortcut = tf.keras.layers.AvgPool2D(2, strides=2, padding="same")(shortcut)
shortcut = tf.keras.layers.Conv2D(
out_filters, 1, kernel_initializer="glorot_normal", use_bias=False,
)(shortcut)
shortcut = tf.keras.layers.BatchNormalization(momentum=0.8)(shortcut)
x = lq.layers.QuantConv2D(
out_filters,
3,
strides=1 if out_filters == in_filters else 2,
padding="same",
input_quantizer="approx_sign",
kernel_quantizer=None,
kernel_initializer="glorot_normal",
kernel_constraint=None,
use_bias=False,
)(x)
x = tf.keras.layers.BatchNormalization(momentum=0.8)(x)
return tf.keras.layers.add([x, shortcut])
Channel scaling follows Figure 1 (Right).
"""
in_channels = x.shape[-1]
out_channels = int(in_channels * 2 if downsample else in_channels)
# Shortcut, which gets downsampled if necessary
shortcut_add = self.shortcut_connection(x, name, in_channels, out_channels)
# Batch Normalization
conv_input = tf.keras.layers.BatchNormalization(
momentum=self.momentum, name=f"{name}_batch_norm"
)(x)
# Convolution
conv_output = lq.layers.QuantConv2D(
out_channels,
kernel_size=3,
strides=2 if downsample else 1,
padding="same",
input_quantizer=self.input_quantizer,
kernel_quantizer=self.kernel_quantizer,
kernel_constraint=self.kernel_constraint,
kernel_regularizer=self.kernel_regularizer
if self.kernel_quantizer is None
else None,
kernel_initializer=self.kernel_initializer,
use_bias=False,
name=f"{name}_conv2d",
)(conv_input)
# binary convolution rescaling
def binarynet(hparams, input_shape, num_classes):
kwhparams = dict(
input_quantizer="ste_sign",
kernel_quantizer=hparams.kernel_quantizer,
kernel_constraint=hparams.kernel_constraint,
use_bias=False,
)
return tf.keras.models.Sequential(
[
# don't quantize inputs in first layer
lq.layers.QuantConv2D(
hparams.filters,
hparams.kernel_size,
kernel_quantizer=hparams.kernel_quantizer,
kernel_constraint=hparams.kernel_constraint,
use_bias=False,
input_shape=input_shape,
),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantConv2D(
hparams.filters, hparams.kernel_size, padding="same", **kwhparams
),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantConv2D(
2 * hparams.filters, hparams.kernel_size, padding="same", **kwhparams
),
in_filters = x.get_shape().as_list()[-1]
out_filters = filters or in_filters if not double_filters else 2 * in_filters
shortcut = x
if in_filters != out_filters:
shortcut = tf.keras.layers.AvgPool2D(2, strides=2, padding="same")(shortcut)
shortcut = tf.keras.layers.Conv2D(
out_filters,
(1, 1),
kernel_initializer=self.kernel_initializer,
use_bias=False,
)(shortcut)
shortcut = tf.keras.layers.BatchNormalization(momentum=0.8)(shortcut)
x = lq.layers.QuantConv2D(
out_filters,
(3, 3),
strides=1 if out_filters == in_filters else 2,
padding="same",
input_quantizer=self.input_quantizer,
kernel_quantizer=self.kernel_quantizer,
kernel_initializer=self.kernel_initializer,
kernel_constraint=self.kernel_constraint,
use_bias=False,
)(x)
x = tf.keras.layers.BatchNormalization(momentum=0.8)(x)
return tf.keras.layers.add([x, shortcut])
x
)
x = lq.layers.QuantConv2D(384, (3, 3), padding="same", **quant_conv_kwargs)(x)
x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
x
)
x = lq.layers.QuantConv2D(384, (3, 3), padding="same", **quant_conv_kwargs)(x)
x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
x
)
x = lq.layers.QuantConv2D(256, (3, 3), padding="same", **quant_conv_kwargs)(x)
x = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(x)
x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
x
)
x = lq.layers.QuantConv2D(4096, (6, 6), padding="valid", **quant_conv_kwargs)(x)
x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
x
)
if self.include_top:
# Equivalent to a dense layer
x = lq.layers.QuantConv2D(
4096, (1, 1), strides=(1, 1), padding="valid", **quant_conv_kwargs
)(x)
x = tf.keras.layers.BatchNormalization(
momentum=0.9, scale=False, epsilon=1e-3
)(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
self.num_classes,
def quant_conv(
self,
x: tf.Tensor,
filters: int,
kernel: Union[int, Tuple[int, int]],
strides: Union[int, Tuple[int, int]] = 1,
name: str = None,
) -> tf.Tensor:
return lq.layers.QuantConv2D(
filters,
kernel,
strides=strides,
padding="same",
use_bias=False,
input_quantizer=self.input_quantizer,
kernel_quantizer=self.kernel_quantizer,
kernel_constraint=self.kernel_constraint,
kernel_initializer=self.kernel_initializer,
name=name,
)(x)