Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import keras.layers as L
from keras import models
import numpy as np
X = np.random.standard_normal((256*400, 25))
y = np.uint8(np.sum(X ** 2, axis=-1) > 25.)
num_train = 256 * 360
X_train = X[:num_train, :]
y_train = y[:num_train]
X_test = X[num_train:, :]
y_test = y[num_train:]
input_x = L.Input(shape=(25,))
z1 = L.Dense(40, name="hsic_dense_1", activation="relu")(input_x)
z2 = L.Dense(64, name="hsic_dense_2", activation="relu")(z1)
z2 = L.Dropout(0.2)(z2)
z3 = L.Dense(32, name="hsic_dense_3", activation="relu")(z2)
output_x = L.Dense(1, name="output_layer", activation="sigmoid")(z3)
model = models.Model(inputs=input_x, outputs=output_x)
model.compile(optimizers.SGD(0.001),
loss="binary_crossentropy",
metrics=["acc"])
model = HSICBottleneckTrained(model, batch_size=256, lambda_0=100., sigma=10.)()
model.fit(X_train, y_train, epochs=50, validation_data=(X_test, y_test), batch_size=256)
model = PostTrained(model)()
model.compile(optimizers.SGD(0.1),
loss="binary_crossentropy",
def _transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4):
x = BatchNormalization(epsilon=1.1e-5)(input)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
if pooltype == 2:
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
elif pooltype == 1:
x = ZeroPadding2D(padding=(0, 1))(x)
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
elif pooltype == 3:
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
return x, nb_filter
inception_5b_5x5 = Convolution2D(128,5,5,border_mode='same',activation='relu',name='inception_5b/5x5',W_regularizer=l2(0.0002))(inception_5b_5x5_reduce)
inception_5b_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_5b/pool')(inception_5a_output)
inception_5b_pool_proj = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_5b/pool_proj',W_regularizer=l2(0.0002))(inception_5b_pool)
inception_5b_output = merge([inception_5b_1x1,inception_5b_3x3,inception_5b_5x5,inception_5b_pool_proj],mode='concat',concat_axis=1,name='inception_5b/output')
pool5_7x7_s1 = AveragePooling2D(pool_size=(7,7),strides=(1,1),name='pool5/7x7_s2')(inception_5b_output)
loss3_flat = Flatten()(pool5_7x7_s1)
cls3_fc1_pose = Dense(2048,activation='relu',name='cls3_fc1_pose',W_regularizer=l2(0.0002),init="normal")(loss3_flat)
cls3_fc1 = Dropout(0.5)(cls3_fc1_pose)
cls3_fc_pose_xyz = Dense(3,name='cls3_fc_pose_xyz',W_regularizer=l2(0.0002))(cls3_fc1)
cls3_fc_pose_wpqr = Dense(4,name='cls3_fc_pose_wpqr',W_regularizer=l2(0.0002))(cls3_fc1)
# pool5_drop_7x7_s1 = Dropout(0.4)(loss3_flat)
# loss3_classifier = Dense(1000,name='loss3/classifier',W_regularizer=l2(0.0002))(pool5_drop_7x7_s1)
# loss3_classifier_act = Activation('softmax',name='prob')(loss3_classifier)
# googlenet = Model(input=input, output=[loss1_classifier_act,loss2_classifier_act,loss3_classifier_act])
posenet = Model(input=input, output=[cls1_fc_pose_xyz,cls2_fc_pose_xyz,cls3_fc_pose_xyz,cls1_fc_pose_wpqr,cls2_fc_pose_wpqr,cls3_fc_pose_wpqr])
:param output_shape:Target shape,target should be one-hot term
:param output_type:last layer type,multiple(activation="sigmoid") or single(activation="softmax")
:return:keras model
'''
data_input = Input(shape=[input_length])
word_vec = Embedding(input_dim=input_dim + 1,
input_length=input_length,
output_dim=vec_size)(data_input)
x = Conv1D(filters=128,
kernel_size=[3],
strides=1,
padding='same',
activation='relu')(word_vec)
x = GlobalMaxPool1D()(x)
x = Dense(500, activation='relu')(x)
x = Dropout(0.1)(x)
if output_type == 'multiple':
x = Dense(output_shape, activation='sigmoid')(x)
model = Model(inputs=data_input, outputs=x)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
elif output_type == 'single':
x = Dense(output_shape, activation='softmax')(x)
model = Model(inputs=data_input, outputs=x)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
else:
raise ValueError('output_type should be multiple or single')
return model
def get_unet(do=0, activation=ReLU):
inputs = Input((None, None, 3))
conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(inputs)))
conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv1)))
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(pool1)))
conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv2)))
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(pool2)))
conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv3)))
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(pool3)))
conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv4)))
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(pool4)))
conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(conv5)))
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(up6)))
conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv6)))
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(up7)))
conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv7)))
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(up8)))
conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv8)))
y_train = y_train[allin]
all0 = np.asarray(np.where(y_train==0)[0])
all1 = np.asarray(np.where(y_train==1)[0])
# ==================== CLASSIFIER ========================
extracted_features = Input(shape=(num_features,),
dtype='float32', name='input')
if batch_norm:
x = BatchNormalization(axis=-1, momentum=0.99,
epsilon=0.001)(extracted_features)
x = Activation('relu')(x)
else:
x = ELU(alpha=1.0)(extracted_features)
x = Dropout(0.9)(x)
x = Dense(4096, name='fc2', kernel_initializer='glorot_uniform')(x)
if batch_norm:
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
x = Activation('relu')(x)
else:
x = ELU(alpha=1.0)(x)
x = Dropout(0.8)(x)
x = Dense(1, name='predictions',
kernel_initializer='glorot_uniform')(x)
x = Activation('sigmoid')(x)
classifier = Model(input=extracted_features,
output=x, name='classifier')
fold_best_model_path = best_model_path + 'urfd_fold_{}.h5'.format(
fold_number)
classifier.compile(optimizer=adam, loss='binary_crossentropy',
def generate_model():
ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))
x = LSTM(128)(ip)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
def RNN_model(sentence_length,vocabulary_size,embedding_dim,n_classes,lstm_outdim=80,rnn_type='lstm',embedding_matrix=None):
inputs=Input(shape=(sentence_length,),dtype='int32')
if embedding_matrix is None:
embedding=Embedding(vocabulary_size,embedding_dim)(inputs)
else:
embedding_layer = Embedding(vocabulary_size, embedding_dim, weights=[embedding_matrix],
input_length=sentence_length, trainable=True)
embedding = embedding_layer(inputs)
if rnn_type=='gru' or 'GRU':
rnn=Bidirectional(GRU(80,return_sequences=True))(embedding)
elif rnn_type=='lstm' or 'LSTM':
rnn = Bidirectional(LSTM(lstm_outdim, return_sequences=True))(embedding)
else:
raise ("invalid rnn_type input, use gru or lstm")
pool=GlobalMaxPool1D()(rnn)
dropout1=Dropout(0.5)(pool)
outputs=Dense(units=n_classes,activation='softmax')(dropout1)
model=Model(inputs=inputs,outputs=outputs)
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
print(model.summary())
model.save('lstm_keras.h5')
return model
h10_conv_U_a = Convolution2D(30, 3, 3, border_mode='same', init='glorot_uniform', W_regularizer=l2(0.0005))(h9_conv_U)
h10_conv_U_b = Convolution2D(30, 3, 3, border_mode='same', init='glorot_uniform', W_regularizer=l2(0.0005))(h9_conv_U)
h10_conv_U = merge([h10_conv_U_a, h10_conv_U_b], mode='max', concat_axis=1)
h10_conv_U = BatchNormalization(axis=1)(h10_conv_U)
h10_conv_V_a = Convolution2D(30, 3, 3, border_mode='same', init='glorot_uniform', W_regularizer=l2(0.0005))(h9_conv_V)
h10_conv_V_b = Convolution2D(30, 3, 3, border_mode='same', init='glorot_uniform', W_regularizer=l2(0.0005))(h9_conv_V)
h10_conv_V = merge([h10_conv_V_a, h10_conv_V_b], mode='max', concat_axis=1)
h10_conv_V = BatchNormalization(axis=1)(h10_conv_V)
poolY = MaxPooling2D(pool_size=(2, 2))(h10_conv_Y)
poolU = MaxPooling2D(pool_size=(2, 2))(h10_conv_U)
poolV = MaxPooling2D(pool_size=(2, 2))(h10_conv_V)
poolY = Dropout(0.2)(poolY)
poolU = Dropout(0.2)(poolU)
poolV = Dropout(0.2)(poolV)
# Inline connections
Y_to_Y_a = Convolution2D(60, 1, 1, border_mode='same', init='glorot_uniform', W_regularizer=l2(0.0005))(poolY)
Y_to_Y_b = Convolution2D(60, 1, 1, border_mode='same', init='glorot_uniform', W_regularizer=l2(0.0005))(poolY)
Y_to_Y = merge([Y_to_Y_a, Y_to_Y_b], mode='max', concat_axis=1)
Y_to_Y = BatchNormalization(axis=1)(Y_to_Y)
U_to_U_a = Convolution2D(30, 1, 1, border_mode='same', init='glorot_uniform', W_regularizer=l2(0.0005))(poolU)
U_to_U_b = Convolution2D(30, 1, 1, border_mode='same', init='glorot_uniform', W_regularizer=l2(0.0005))(poolU)
U_to_U = merge([U_to_U_a, U_to_U_b], mode='max', concat_axis=1)
U_to_U = BatchNormalization(axis=1)(U_to_U)
V_to_V_a = Convolution2D(30, 1, 1, border_mode='same', init='glorot_uniform', W_regularizer=l2(0.0005))(poolV)
V_to_V_b = Convolution2D(30, 1, 1, border_mode='same', init='glorot_uniform', W_regularizer=l2(0.0005))(poolV)
V_to_V = merge([V_to_V_a, V_to_V_b], mode='max', concat_axis=1)
#Qmodel.add(Dropout(0.2))
#Qmodel.add(Dense(256, activation='relu'))
#Qmodel.add(Dropout(0.2))
Qmodel.add(Dense(dataY.shape[1]))
#opt = optimizers.adam(lr=learning_rate)
opt = optimizers.RMSprop()
Qmodel.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
#initialize the action predictor model
action_predictor_model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
action_predictor_model.add(Dense(2048, activation='relu', input_dim=apdataX.shape[1]))
action_predictor_model.add(Dropout(0.2))
#action_predictor_model.add(Dense(256, activation='relu'))
#action_predictor_model.add(Dropout(0.5))
#action_predictor_model.add(Dense(256, activation='tanh'))
#action_predictor_model.add(Dropout(0.5))
#action_predictor_model.add(Dense(256, activation='relu'))
#action_predictor_model.add(Dropout(0.5))
#action_predictor_model.add(Dense(512, activation='relu'))
#action_predictor_model.add(Dropout(0.2))
#action_predictor_model.add(Dense(64*8, activation='relu'))
action_predictor_model.add(Dense(apdataY.shape[1]))
opt2 = optimizers.adam(lr=apLearning_rate)
#opt2 = optimizers.RMSprop()
action_predictor_model.compile(loss='mse', optimizer=opt2, metrics=['accuracy'])