Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if strides > 1:
layer = _factorized_reduction(
'gate_reduction', layer, out_filters, data_format)
# this is important for computing hallucination statistics.
pre_gate_layer = layer
if operation == LayerTypes.GATED_LAYER:
layer = finalized_gated_layer('gated_layer', layer)
elif operation == LayerTypes.NO_FORWARD_LAYER:
layer = candidate_gated_layer('gated_layer', layer)
else:
layer = finalized_gated_layer('anti_gated_layer', layer, init_val=1.0)
layer.pre_gate_layer = pre_gate_layer
return layer
elif operation == LayerTypes.FullyConnected:
layer = FullyConnected('fully_connect', layer, out_filters)
layer = tf.nn.relu(layer, 'relu')
return layer
elif operation in [
LayerTypes.FC_TANH_MUL_GATE, LayerTypes.FC_SGMD_MUL_GATE,
LayerTypes.FC_RELU_MUL_GATE, LayerTypes.FC_IDEN_MUL_GATE]:
ht = FullyConnected(
'fully_connect', layer, 2 * out_filters,
activation=tf.identity, use_bias=False)
ch_dim = 1
h, t = tf.split(ht, 2, axis=ch_dim)
t = tf.sigmoid(t)
if operation == LayerTypes.FC_TANH_MUL_GATE:
h = tf.tanh(h)
elif operation == LayerTypes.FC_SGMD_MUL_GATE:
h = tf.sigmoid(h)
elif operation == LayerTypes.NO_FORWARD_LAYER:
layer = candidate_gated_layer('gated_layer', layer)
else:
layer = finalized_gated_layer('anti_gated_layer', layer, init_val=1.0)
layer.pre_gate_layer = pre_gate_layer
return layer
elif operation == LayerTypes.FullyConnected:
layer = FullyConnected('fully_connect', layer, out_filters)
layer = tf.nn.relu(layer, 'relu')
return layer
elif operation in [
LayerTypes.FC_TANH_MUL_GATE, LayerTypes.FC_SGMD_MUL_GATE,
LayerTypes.FC_RELU_MUL_GATE, LayerTypes.FC_IDEN_MUL_GATE]:
ht = FullyConnected(
'fully_connect', layer, 2 * out_filters,
activation=tf.identity, use_bias=False)
ch_dim = 1
h, t = tf.split(ht, 2, axis=ch_dim)
t = tf.sigmoid(t)
if operation == LayerTypes.FC_TANH_MUL_GATE:
h = tf.tanh(h)
elif operation == LayerTypes.FC_SGMD_MUL_GATE:
h = tf.sigmoid(h)
elif operation == LayerTypes.FC_RELU_MUL_GATE:
h = tf.nn.relu(h)
elif operation == LayerTypes.FC_IDEN_MUL_GATE:
h = tf.identity(h)
# add residual
if _get_dim(layer, ch_dim) != out_filters:
layer = FullyConnected(
def projection_layer(name, layer, out_filters, ch_dim, id_mask_slice=None):
with tf.variable_scope(name):
n_dim = len(layer.get_shape().as_list())
if n_dim == 4:
layer = tf.nn.relu(layer)
layer = Conv2D('conv1x1_proj', layer, out_filters, 1, strides=1, activation=tf.identity)
layer = BatchNorm('bn_proj', layer)
elif n_dim == 2:
layer = tf.nn.relu(layer)
layer = FullyConnected('fc_proj', layer, out_filters, activation=tf.identity)
else:
raise ValueError("Projection cannot handle tensor of dim {}".format(n_dim))
return layer
for si in range(n_scales):
for li in range(n_layers_per_scale):
name = 'layer{:03d}'.format(si * n_layers_per_scale + li)
strides = 1
if li == 0 and si > 0:
strides = 2
out_filters *= 2
with tf.variable_scope(name):
l = residual_bottleneck_layer('res_btl', l, out_filters, strides, data_format)
# only use the last output for predicting the child model accuracy
l = GlobalAvgPooling('gap', l)
pve = tf.reshape(pve, [-1, 1])
h_stats = [tf.reshape(hs, [-1, 1]) for hs in h_stats]
l = tf.concat(values=[pve, l] + h_stats, axis=ch_dim)
pred = FullyConnected('fully_connect', l, 1, activation=tf.sigmoid)
pred = tf.reshape(pred, [-1])
self.pred = tf.identity(pred, name='predicted_accuracy')
cost = tf.losses.mean_squared_error(target, self.pred)
self.cost = tf.identity(cost, name='cost')
add_moving_summary(self.cost)
return self.cost
num_convs (int): number of conv layers
norm (str or None): either None or 'GN'
Returns:
2D head feature
"""
assert norm in [None, 'GN'], norm
l = feature
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out', distribution='normal')):
for k in range(num_convs):
l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu)
if norm is not None:
l = GroupNorm('gn{}'.format(k), l)
l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM,
kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu)
return l
def fastrcnn_2fc_head(feature):
"""
Args:
feature (any shape):
Returns:
2D head feature
"""
dim = cfg.FPN.FRCNN_FC_HEAD_DIM
init = tf.variance_scaling_initializer()
hidden = FullyConnected('fc6', feature, dim, kernel_initializer=init, activation=tf.nn.relu)
hidden = FullyConnected('fc7', hidden, dim, kernel_initializer=init, activation=tf.nn.relu)
return hidden
"""
Fully connected layer for the class and box branch
Args:
feature map: The roi feature map, Num_boxes x Num_channels x H_roi x W_roi
Returns:
2D head feature: Num_boxes x Num_features
"""
dim = cfg.FPN.BOXCLASS_FC_HEAD_DIM
if fp16:
feature = tf.cast(feature, tf.float16)
with mixed_precision_scope(mixed=fp16):
init = tf.variance_scaling_initializer(dtype=tf.float16 if fp16 else tf.float32, seed=seed_gen.next())
hidden = FullyConnected('fc6', feature, dim, kernel_initializer=init, activation=tf.nn.relu)
hidden = FullyConnected('fc7', hidden, dim, kernel_initializer=init, activation=tf.nn.relu)
if fp16:
hidden = tf.cast(hidden, tf.float32)
return hidden
norm (str or None): either None or 'GN'
Returns:
2D head feature
"""
assert norm in [None, 'GN'], norm
l = feature
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out',
distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
for k in range(num_convs):
l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu)
if norm is not None:
l = GroupNorm('gn{}'.format(k), l)
l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM,
kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu)
return l