Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
model = Sequential()
model.add(Convolution2D(nb_filters, (kernel_size[0], kernel_size[1]),
border_mode='valid',
input_shape=(img_rows, img_cols, 3)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, (kernel_size[0], kernel_size[1])))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
# model.add(Convolution2D(nb_filters, (kernel_size[0], kernel_size[1])))
# model.add(Activation('relu'))
# model.add(Convolution2D(nb_filters, (kernel_size[0], kernel_size[1])))
# model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
ypred = model.predict(X_test)
print('Test score:', score[0])
print('Test accuracy:', score[1])
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
submodel_input = Input(shape=(26, 26, 8))
submodel_conv = Conv2D(8, kernel_size=(3, 3), activation="relu")
submodel_1 = submodel_conv(submodel_input)
submodel_2 = submodel_conv(submodel_1) # use same layer multiple times
submodel_3 = Conv2D(16, kernel_size=(3, 3), activation="relu")(submodel_1)
submodel = Model(inputs=[submodel_input], outputs=[submodel_3, submodel_2])
subseq = Sequential()
subseq.add(Conv2D(16, kernel_size=(3, 3), activation="relu", input_shape=(22, 22, 16)))
subseq.add(Flatten())
subseq.add(Dense(10))
hidden_2, hidden_3 = submodel(hidden_1)
hidden_4 = subseq(hidden_2)
hidden_5 = Flatten()(hidden_3)
hidden_6 = Dense(10)(hidden_5)
hidden_sum = add([hidden_4, hidden_6])
nn_output = Activation(activation="softmax")(hidden_sum)
model = Model(inputs=[nn_input], outputs=[nn_output])
else:
raise NotImplementedError("Unknown model type")
print(f"input shape: {input_shape}, data_format: {K.image_data_format()}")
return model
input = Input(shape=[max_document_length])
# 词向量层,本文使用了预训练word2vec词向量,把trainable设为False
x = Embedding(max_features + 1,
embedding_dims,
weights=[embedding_matrix],
trainable=trainable)(input)
# conv layers
convs = []
for filter_size in [3,4,5]:
l_conv = Conv1D(filters=filters, kernel_size=filter_size, activation='relu')(x)
l_pool = MaxPooling1D()(l_conv)
l_pool = Flatten()(l_pool)
convs.append(l_pool)
merge = concatenate(convs, axis=1)
out = Dropout(0.2)(merge)
output = Dense(32, activation='relu')(out)
output = Dense(units=2, activation='softmax')(output)
#输出层
model = Model([input], output)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
def _encoder_model(self, input_shape, hyperparameters):
squeezenet = SqueezeNet(
input_shape=(self.input_shape[0], self.input_shape[1], 3),
include_top=False,
)
x = Flatten()(squeezenet.output)
embedding = Dense(np.prod(hyperparameters['embedding_dim']), activation='relu')(x)
encoder = Model(squeezenet.input, embedding)
utils.freeze_layers(squeezenet)
return encoder
def out_block(input_tensor, nb_classes):
"""
FC output
"""
x = Flatten()(input_tensor)
x = Dense(1024)(x)
x = relu()(x)
x = BatchNormalization(momentum=0.66)(x)
x = Dense(256)(x)
x = relu()(x)
x = BatchNormalization(momentum=0.66)(x)
x = Dense(nb_classes)(x)
x = Activation('softmax')(x)
return x
def encode_shared(input):
conv_1 = LeakyReluConv2D(filters=256, kernel_size=3, strides=1, padding='same')(input)
conv_2 = LeakyReluConv2D(filters=256, kernel_size=3, strides=2, padding='same')(conv_1)
flat = Flatten()(conv_2)
z_mean = Dense(512)(flat)
z_log_var = Dense(512)(flat)
z = Lambda(sampling, output_shape=(512,))([z_mean, z_log_var])
return z, z_mean, z_log_var
def create_model(self):
user_id_input = Input(shape=[1], name='user')
item_id_input = Input(shape=[1], name='item')
meta_input = Input(shape=[1], name='meta_item')
user_embedding = Embedding(output_dim=EMBEDDING_SIZE, input_dim=self.max_user_id + 1,
input_length=1, name='user_embedding')(user_id_input)
item_embedding = Embedding(output_dim=EMBEDDING_SIZE, input_dim=self.max_item_id + 1,
input_length=1, name='item_embedding')(item_id_input)
# reshape from shape: (batch_size, input_length, embedding_size)
# to shape: (batch_size, input_length * embedding_size) which is
# equal to shape: (batch_size, embedding_size)
user_vecs = Flatten()(user_embedding)
item_vecs = Flatten()(item_embedding)
input_vecs = concatenate([user_vecs, item_vecs, meta_input])
input_vecs = Dropout(0.5)(input_vecs)
x = Dense(64, activation='relu')(input_vecs)
y = Dense(1)(x)
model = Model(inputs=[user_id_input, item_id_input, meta_input], outputs=[y])
model.compile(optimizer='adam', loss='mae')
return model
base_model = Model(inputs=base_model.get_input_at(0), outputs=[base_model.get_output_at(0)], name='resnet50')
# base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))
# base_model = Model(inputs=[base_model.input], outputs=[base_model.output], name='resnet50')
# for layer in base_model.layers[:len(base_model.layers)/2]:
# layer.trainable = False
for layer in base_model.layers:
if isinstance(layer, BatchNormalization):
layer.trainable = False
print 'to layer: %d' % (len(base_model.layers)/3*2)
img0 = Input(shape=(224, 224, 3), name='img_0')
img1 = Input(shape=(224, 224, 3), name='img_1')
img2 = Input(shape=(224, 224, 3), name='img_2')
feature0 = Flatten()(base_model(img0))
feature1 = Flatten()(base_model(img1))
feature2 = Flatten()(base_model(img2))
dis1 = Lambda(eucl_dist, name='square1')([feature0, feature1])
dis2 = Lambda(eucl_dist, name='square2')([feature0, feature2])
score1 = Dense(1, activation='sigmoid', name='score1')(dis1)
score2 = Dense(1, activation='sigmoid', name='score2')(dis2)
sub_score = Lambda(sub, name='sub_score')([score1, score2])
model = Model(inputs=[img0, img1, img2], outputs=[sub_score])
# model = Model(inputs=[img0, img1, img2], outputs=[sub_score])
model.get_layer('score1').set_weights(pair_model.get_layer('bin_out').get_weights())
model.get_layer('score2').set_weights(pair_model.get_layer('bin_out').get_weights())
plot_model(model, to_file='rank_model.png')
print(model.summary())
return model
y = BatchNormalization(axis=concat_axis)(y)
y = Activation('relu')(y)
y = Conv2D(r.filters[2], (1, 1))(y)
y = BatchNormalization(axis=concat_axis)(y)
if r.identity:
x = layers.add([y, x])
else:
shortcut = Conv2D(r.filters[2], (1, 1), strides=r.strides)(x)
shortcut = BatchNormalization(axis=concat_axis)(shortcut)
x = layers.add([y, shortcut])
x = Activation('relu')(x)
x = Flatten()(x)
# Mix the variant annotations in
annotations = annotations_in = Input(shape=(len(args.annotations),), name=args.annotation_set)
if annotation_batch_normalize:
annotations_in = BatchNormalization(axis=-1)(annotations)
annotations_mlp = Dense(units=annotation_units, kernel_initializer=fc_initializer, activation='relu')(annotations_in)
x = layers.concatenate([x, annotations_mlp], axis=concat_axis)
# Fully connected layers
for fc_units in fc_layers:
if fc_batch_normalize:
x = Dense(units=fc_units, kernel_initializer=fc_initializer, activation='linear')(x)
x = BatchNormalization(axis=1)(x)
x = Activation('relu')(x)