Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
elif type(W_init) in [tf.Tensor, np.ndarray, list]:
filter_size = None
W_regul = None
if regularizer is not None:
W_regul = lambda x: regularizers.get(regularizer)(x, weight_decay)
W = vs.variable('W', shape=filter_size,
regularizer=W_regul, initializer=W_init,
trainable=trainable, restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
b_shape = [nb_filter]
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
elif type(bias_init) in [tf.Tensor, np.ndarray, list]:
b_shape = None
b = vs.variable('b', shape=b_shape, initializer=bias_init,
trainable=trainable, restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
# Determine the complete shape of the output tensor.
batch_size = tf.gather(tf.shape(incoming), tf.constant([0]))
if len(output_shape) == 3:
output_shape = output_shape + [nb_filter]
elif len(output_shape) != 4:
raise Exception("output_shape length error: "
+ str(len(output_shape))
+ ", only a length of 3 or 4 is supported.")
complete_out_shape = tf.concat([batch_size, tf.constant(output_shape)], 0)
W_init = initializations.get(weights_init)()
elif type(W_init) in [tf.Tensor, np.ndarray, list]:
filter_size = None
W_regul = None
if regularizer is not None:
W_regul = lambda x: regularizers.get(regularizer)(x, weight_decay)
W = va.variable('W', shape=filter_size, regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
b_shape = [n_units]
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
elif type(bias_init) in [tf.Tensor, np.ndarray, list]:
b_shape = None
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
b = va.variable('b', shape=b_shape, initializer=bias_init,
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
inference = incoming
# If input is not 2d, flatten it.
if len(input_shape) > 2:
inference = tf.reshape(inference, [-1, n_inputs])
inference = tf.matmul(inference, W)
if b is not None: inference = tf.nn.bias_add(inference, b)
if activation:
name = scope.name
W_init = weights_init
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
W_regul = None
if regularizer is not None:
W_regul = lambda x: regularizers.get(regularizer)(x, weight_decay)
W = vs.variable('W', shape=filter_size, regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
bias_init = initializations.get(bias_init)()
b = vs.variable('b', shape=nb_filter, initializer=bias_init,
trainable=trainable, restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
# Weight and bias for the transform gate
W_T = vs.variable('W_T', shape=nb_filter,
regularizer=None, initializer=W_init,
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' +
name, W_T)
b_T = vs.variable('b_T', shape=nb_filter,
initializer=tf.constant_initializer(-3),
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' +
"""
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"
filter_size = utils.autoformat_filter_conv2d(filter_size,
input_shape[-1],
nb_filter)
strides = utils.autoformat_kernel_2d(strides)
padding = utils.autoformat_padding(padding)
with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
name = scope.name
W_init = weights_init
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
W_regul = None
if regularizer:
W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
W = vs.variable('W', shape=filter_size, regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
b = vs.variable('b', shape=nb_filter, initializer=bias_init,
trainable=trainable, restore=restore)
elif type(W_init) in [tf.Tensor, np.ndarray, list]:
filter_size = None
W_regul = None
if regularizer is not None:
W_regul = lambda x: regularizers.get(regularizer)(x, weight_decay)
W = vs.variable('W', shape=filter_size, regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
b_shape = [nb_filter]
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
elif type(bias_init) in [tf.Tensor, np.ndarray, list]:
b_shape = None
b = vs.variable('b', shape=b_shape, initializer=bias_init,
trainable=trainable, restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
inference = tf.nn.conv3d(incoming, W, strides, padding)
if b is not None: inference = tf.nn.bias_add(inference, b)
if isinstance(activation, str):
inference = activations.get(activation)(inference)
elif hasattr(activation, '__call__'):
inference = activation(inference)
else:
raise ValueError("Invalid Activation.")
elif type(W_init) in [tf.Tensor, np.ndarray, list]:
filter_size = None
W_regul = None
if regularizer is not None:
W_regul = lambda x: regularizers.get(regularizer)(x, weight_decay)
W = vs.variable('W', shape=filter_size, regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
b_shape = [nb_filter]
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
elif type(bias_init) in [tf.Tensor, np.ndarray, list]:
b_shape = None
b = vs.variable('b', shape=b_shape, initializer=bias_init,
trainable=trainable, restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
# Adding dummy dimension to fit with Tensorflow conv2d
inference = tf.expand_dims(incoming, 2)
inference = tf.nn.conv2d(inference, W, strides, padding)
if b is not None: inference = tf.nn.bias_add(inference, b)
inference = tf.squeeze(inference, [2])
if isinstance(activation, str):
inference = activations.get(activation)(inference)
elif hasattr(activation, '__call__'):
Links:
[https://arxiv.org/abs/1505.00387](https://arxiv.org/abs/1505.00387)
"""
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
n_inputs = int(np.prod(input_shape[1:]))
# Build variables and inference.
with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
name = scope.name
W_init = weights_init
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
W_regul = None
if regularizer:
W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
W = va.variable('W', shape=[n_inputs, n_units], regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
b = va.variable('b', shape=[n_units], initializer=bias_init,
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
# Weight and bias for the transform gate
W_T = va.variable('W_T', shape=[n_inputs, n_units],
"""
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"
filter_size = utils.autoformat_filter_conv2d(filter_size,
nb_filter,
input_shape[-1])
strides = utils.autoformat_kernel_2d(strides)
padding = utils.autoformat_padding(padding)
with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
name = scope.name
W_init = weights_init
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
W_regul = None
if regularizer:
W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
W = vs.variable('W', shape=filter_size,
regularizer=W_regul, initializer=W_init,
trainable=trainable, restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
b = vs.variable('b', shape=nb_filter, initializer=bias_init,
trainable=trainable, restore=restore)
# Track per layer variables
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
W_regul = None
if regularizer:
W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
W = vs.variable('W', shape=filter_size, regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
b = vs.variable('b', shape=nb_filter, initializer=bias_init,
trainable=trainable, restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
inference = tf.nn.conv2d(incoming, W, strides, padding)
if b: inference = tf.nn.bias_add(inference, b)
if isinstance(activation, str):
inference = activations.get(activation)(inference)
elif hasattr(activation, '__call__'):
inference = activation(inference)
else:
raise ValueError("Invalid Activation.")
# Track activations.