Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
name='adjust_conv_1_%s' % id, kernel_initializer='he_normal')(p1)
p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_2_%s' % id)(p2)
p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
name='adjust_conv_2_%s' % id, kernel_initializer='he_normal')(p2)
p = concatenate([p1, p2], axis=channel_dim)
p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='adjust_bn_%s' % id)(p)
elif p._keras_shape[channel_dim] != filters:
with K.name_scope('adjust_projection_block_%s' % id):
p = Activation('relu')(p)
p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='adjust_conv_projection_%s' % id,
use_bias=False, kernel_regularizer=l2(weight_decay), kernel_initializer='he_normal')(p)
p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='adjust_bn_%s' % id)(p)
return p
N, C, D = 2, 3, 3
x = create_tensor(N, C)
sub_input1 = Input(shape=(C,))
sub_mapped1 = Dense(D)(sub_input1)
sub_output1 = Activation('sigmoid')(sub_mapped1)
sub_model1 = Model(inputs=sub_input1, outputs=sub_output1)
sub_input2 = Input(shape=(C,))
sub_mapped2 = sub_model1(sub_input2)
sub_output2 = Activation('tanh')(sub_mapped2)
sub_model2 = Model(inputs=sub_input2, outputs=sub_output2)
input1 = Input(shape=(D,))
input2 = Input(shape=(D,))
mapped1_1 = Activation('tanh')(input1)
mapped2_1 = Activation('sigmoid')(input2)
mapped1_2 = sub_model1(mapped1_1)
mapped1_3 = sub_model1(mapped1_2)
mapped2_2 = sub_model2(mapped2_1)
sub_sum = Add()([mapped1_3, mapped2_2])
model = Model(inputs=[input1, input2], outputs=sub_sum)
# coremltools can't convert this kind of model.
self._test_one_to_one_operator_keras(model, [x, 2 * x])
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
c_inputs = Input(shape=(self.num_attrs,))
x = BasicConvLayer(filters=64, kernel_size=(5, 5), bn=True)(x_inputs)
x = BasicConvLayer(filters=128, kernel_size=(5, 5), strides=(2, 2), bn=True)(x)
x = BasicConvLayer(filters=256, kernel_size=(5, 5), strides=(2, 2), bn=True)(x)
x = BasicConvLayer(filters=512, kernel_size=(5, 5), strides=(2, 2), bn=True)(x)
x = Flatten()(x)
x = Concatenate(axis=-1)([x, c_inputs])
x = Dense(1024)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('sigmoid')(x)
return Model([x_inputs, c_inputs], x, name='discriminator')
model = Sequential()
model.add(Conv2D(nb_filters, (nb_conv, nb_conv),
padding='valid',
input_shape=(img_channels, img_rows, img_cols)))
convout1 = Activation('relu')
model.add(convout1)
model.add(Conv2D(nb_filters, (nb_conv, nb_conv)))
convout2 = Activation('relu')
model.add(convout2)
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# Model summary
model.summary()
# Model conig details
model.get_config()
if wf_index >= 0:
#Load pretrained weights
fname = WeightFileName[int(wf_index)]
print("loading ", fname)
model.load_weights(fname)
def build_generator(self):
z_inputs = Input(shape=(self.z_dims,))
c_inputs = Input(shape=(self.num_attrs,))
x = Concatenate(axis=-1)([z_inputs, c_inputs])
x = Dense(4 * 4 * 512)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Reshape((4, 4, 512))(x)
x = BasicDeconvLayer(filters=512, kernel_size=(5, 5), upsample=True, bn=True)(x)
x = BasicDeconvLayer(filters=256, kernel_size=(5, 5), upsample=True, bn=True)(x)
x = BasicDeconvLayer(filters=256, kernel_size=(5, 5), upsample=True, bn=True)(x)
x = BasicDeconvLayer(filters=128, kernel_size=(5, 5), upsample=True, bn=True)(x)
x = BasicDeconvLayer(filters=3, kernel_size=(3, 3), upsample=False, activation='tanh')(x)
return Model([z_inputs, c_inputs], x, name='generator')
def d_layer(layer_input, filters, strides_=2,f_size=3, bn=True):
## Discriminator layers
d = Conv2D(filters, kernel_size=f_size, strides=strides_, padding='same')(layer_input)
#d = LeakyReLU(alpha=0.2)(d)
d = Activation('relu')(d)
if bn: d = BatchNormalization(momentum=0.8)(d)
return d
x = LSTM(128)(ip)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(NB_CLASS, activation='softmax')(x)
model = Model(ip, out)
model.summary()
# add load model code here to fine-tune
return model
relu_name_base = 'relu' + str(stage) + '_' + str(branch)
# 1x1 Convolution (Bottleneck layer)
inter_channel = nb_filter * 4
x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_x1_bn')(x)
x = Scale(axis=3, name=conv_name_base+'_x1_scale')(x)
x = Activation('relu', name=relu_name_base+'_x1')(x)
x = Convolution2D(inter_channel, (1, 1), name=conv_name_base+'_x1', use_bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
# 3x3 Convolution
x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_x2_bn')(x)
x = Scale(axis=3, name=conv_name_base+'_x2_scale')(x)
x = Activation('relu', name=relu_name_base+'_x2')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
x = Convolution2D(nb_filter, (3, 3), name=conv_name_base+'_x2', use_bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
**options
)(outputs)
outputs = keras.layers.Conv2D(
filters=num_classes * num_anchors,
kernel_initializer=keras.initializers.normal(mean=0.0, stddev=0.01, seed=None),
bias_initializer=initializers.PriorProbability(probability=prior_probability),
name='pyramid_classification',
**options
)(outputs)
# reshape output and apply sigmoid
if keras.backend.image_data_format() == 'channels_first':
outputs = keras.layers.Permute((2, 3, 1), name='pyramid_classification_permute')(outputs)
outputs = keras.layers.Reshape((-1, num_classes), name='pyramid_classification_reshape')(outputs)
outputs = keras.layers.Activation('sigmoid', name='pyramid_classification_sigmoid')(outputs)
return keras.models.Model(inputs=inputs, outputs=outputs, name=name)