Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
network = input_data(shape=[None, 227, 227, 3])
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 8, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=0.001)
# Training
model = tflearn.DNN(network,tensorboard_dir='/uhpc/roysam/aditi/alexnet', checkpoint_path='/uhpc/roysam/aditi/alexnet/model_alexnet',max_checkpoints=1, tensorboard_verbose=2)
model.fit(X_train, Y_train, n_epoch=1000, validation_set=0.1, shuffle=True,
show_metric=True, batch_size=64, snapshot_step=200,
snapshot_epoch=False, run_id='training')
images_network = conv_2d(images_network, 64, 3, activation=NETWORK.activation)
#images_network = local_response_normalization(images_network)
if NETWORK.use_batchnorm_after_conv_layers:
images_network = batch_normalization(images_network)
images_network = max_pool_2d(images_network, 3, strides = 2)
images_network = conv_2d(images_network, 128, 3, activation=NETWORK.activation)
if NETWORK.use_batchnorm_after_conv_layers:
images_network = batch_normalization(images_network)
images_network = max_pool_2d(images_network, 3, strides = 2)
images_network = conv_2d(images_network, 256, 3, activation=NETWORK.activation)
if NETWORK.use_batchnorm_after_conv_layers:
images_network = batch_normalization(images_network)
images_network = max_pool_2d(images_network, 3, strides = 2)
images_network = dropout(images_network, keep_prob=keep_prob)
images_network = fully_connected(images_network, 4096, activation=NETWORK.activation)
images_network = dropout(images_network, keep_prob=keep_prob)
images_network = fully_connected(images_network, 1024, activation=NETWORK.activation)
if NETWORK.use_batchnorm_after_fully_connected_layers:
images_network = batch_normalization(images_network)
if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks:
if NETWORK.use_hog_sliding_window_and_landmarks:
landmarks_network = input_data(shape=[None, 2728], name='input2')
elif NETWORK.use_hog_and_landmarks:
landmarks_network = input_data(shape=[None, 208], name='input2')
else:
landmarks_network = input_data(shape=[None, 68, 2], name='input2')
landmarks_network = fully_connected(landmarks_network, 1024, activation=NETWORK.activation)
if NETWORK.use_batchnorm_after_fully_connected_layers:
landmarks_network = batch_normalization(landmarks_network)
landmarks_network = fully_connected(landmarks_network, 128, activation=NETWORK.activation)
if NETWORK.use_batchnorm_after_fully_connected_layers:
if verbose:
print name, 'FC params = ', np.prod(layer.W.get_shape().as_list()) + np.prod(layer.b.get_shape().as_list()),
if b_norm:
name += '_bnorm'
scope_i = expand_scope_by_name(scope, name)
layer = batch_normalization(layer, name=name, reuse=reuse, scope=scope_i)
if verbose:
print 'bnorm params = ', np.prod(layer.beta.get_shape().as_list()) + np.prod(layer.gamma.get_shape().as_list())
if non_linearity is not None:
layer = non_linearity(layer)
if dropout_prob is not None and dropout_prob[i] > 0:
layer = dropout(layer, 1.0 - dropout_prob[i])
if verbose:
print layer
print 'output size:', np.prod(layer.get_shape().as_list()[1:]), '\n'
# Last decoding layer never has a non-linearity.
name = 'decoder_fc_' + str(n_layers - 1)
scope_i = expand_scope_by_name(scope, name)
layer = fully_connected(layer, layer_sizes[n_layers - 1], activation='linear', weights_init='xavier', name=name, regularizer=regularizer, weight_decay=weight_decay, reuse=reuse, scope=scope_i)
if verbose:
print name, 'FC params = ', np.prod(layer.W.get_shape().as_list()) + np.prod(layer.b.get_shape().as_list()),
if b_norm_finish:
name += '_bnorm'
scope_i = expand_scope_by_name(scope, name)
layer = batch_normalization(layer, name=name, reuse=reuse, scope=scope_i)
def build_model(self):
convnet = input_data(shape=[None, self.image_size, self.image_size, 3], name='input')
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 128, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, len(self.classes), activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=self.learning_rate, loss='categorical_crossentropy',
name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
return model
block4_conv1 = conv_2d(block3_pool, 512, 3, activation='relu', name='block4_conv1')
block4_conv2 = conv_2d(block4_conv1, 512, 3, activation='relu', name='block4_conv2')
block4_conv3 = conv_2d(block4_conv2, 512, 3, activation='relu', name='block4_conv3')
block4_conv4 = conv_2d(block4_conv3, 512, 3, activation='relu', name='block4_conv4')
block4_pool = max_pool_2d(block4_conv4, 2, strides=2, name='block4_pool')
block5_conv1 = conv_2d(block4_pool, 512, 3, activation='relu', name='block5_conv1')
block5_conv2 = conv_2d(block5_conv1, 512, 3, activation='relu', name='block5_conv2')
block5_conv3 = conv_2d(block5_conv2, 512, 3, activation='relu', name='block5_conv3')
block5_conv4 = conv_2d(block5_conv3, 512, 3, activation='relu', name='block5_conv4')
block4_pool = max_pool_2d(block5_conv4, 2, strides=2, name='block4_pool')
flatten_layer = tflearn.layers.core.flatten(block4_pool, name='Flatten')
fc1 = fully_connected(flatten_layer, 4096, activation='relu')
dp1 = dropout(fc1, 0.5)
fc2 = fully_connected(dp1, 4096, activation='relu')
dp2 = dropout(fc2, 0.5)
network = fully_connected(dp2, 1000, activation='rmsprop')
regression = tflearn.regression(network, optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.001)
model = tflearn.DNN(regression, checkpoint_path='vgg19',
tensorboard_dir="./logs")
padding='same',
name='MaxPool2D')
net = tflearn.layers.core.flatten(net, name='Flatten')
net = fully_connected(net, 1024,
activation='tanh',
weights_init='truncated_normal',
bias_init='zeros',
regularizer=None,
weight_decay=0)
net = fully_connected(net, 1024,
activation='tanh',
weights_init='truncated_normal',
bias_init='zeros',
regularizer=None,
weight_decay=0)
net = tflearn.layers.core.dropout(net, keep_prob=0.5)
y_conv = fully_connected(net, 369,
activation='softmax',
weights_init='truncated_normal',
bias_init='zeros',
regularizer=None,
weight_decay=0)
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
print(" shape: %s" % str(shape))
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
print(" variable_parametes: %i" % variable_parametes)
drug_gru_1_gate_bias.append(v)
elif "GRU_2/GRU_2/GRUCell/Candidate/Linear/Bias" in v.name :
drug_gru_1_candidate_bias.append(v)
elif "GRU_3/GRU_3/GRUCell/Gates/Linear/Matrix" in v.name :
drug_gru_2_gate_matrix.append(v)
elif "GRU_3/GRU_3/GRUCell/Candidate/Linear/Matrix" in v.name :
drug_gru_2_candidate_matrix.append(v)
elif "GRU_3/GRU_3/GRUCell/Gates/Linear/Bias" in v.name :
drug_gru_2_gate_bias.append(v)
elif "GRU_3/GRU_3/GRUCell/Candidate/Linear/Bias" in v.name :
drug_gru_2_candidate_bias.append(v)
elif "Embedding_1" in v.name:
drug_embd_W.append(v)
fc_1 = fully_connected(pool_2, 600, activation='leakyrelu',weights_init="xavier",name='fully1')
drop_2 = dropout(fc_1, 0.8)
fc_2 = fully_connected(drop_2, 300, activation='leakyrelu',weights_init="xavier",name='fully2')
drop_3 = dropout(fc_2, 0.8)
linear = fully_connected(drop_3, 1, activation='linear',name='fully3')
reg = regression(linear, optimizer='adam', learning_rate=0.0001,
loss='mean_square', name='target')
# Training
model = tflearn.DNN(reg, tensorboard_verbose=0,tensorboard_dir='./mytensor/',checkpoint_path="./checkpoints/")
######### Setting weights
model.set_weights(prot_gru_1_gate_matrix[0],prot_gru_1_gates_kernel_init)
model.set_weights(prot_gru_1_gate_bias[0],prot_gru_1_gates_bias_init)
model.set_weights(prot_gru_1_candidate_matrix[0],prot_gru_1_candidate_kernel_init)
model.set_weights(prot_gru_1_candidate_bias[0],prot_gru_1_candidate_bias_init)
model.set_weights(prot_gru_2_gate_matrix[0],prot_gru_2_gates_kernel_init)
Input is 48x48
3072 nodes in fully connected layer
"""
self.network = input_data(shape = [None, 48, 48, 1])
print("Input data ",self.network.shape[1:])
self.network = conv_2d(self.network, 64, 5, activation = 'relu')
print("Conv1 ",self.network.shape[1:])
self.network = max_pool_2d(self.network, 3, strides = 2)
print("Maxpool1 ",self.network.shape[1:])
self.network = conv_2d(self.network, 64, 5, activation = 'relu')
print("Conv2 ",self.network.shape[1:])
self.network = max_pool_2d(self.network, 3, strides = 2)
print("Maxpool2 ",self.network.shape[1:])
self.network = conv_2d(self.network, 128, 4, activation = 'relu')
print("Conv3 ",self.network.shape[1:])
self.network = dropout(self.network, 0.3)
print("Dropout ",self.network.shape[1:])
self.network = fully_connected(self.network, 3072, activation = 'relu')
print("Fully connected",self.network.shape[1:])
self.network = fully_connected(self.network, len(self.target_classes), activation = 'softmax')
print("Output ",self.network.shape[1:])
print("\n")
# Generates a TrainOp which contains the information about optimization process - optimizer, loss function, etc
self.network = regression(self.network,optimizer = 'momentum',metric = 'accuracy',loss = 'categorical_crossentropy')
# Creates a model instance.
self.model = tflearn.DNN(self.network,checkpoint_path = 'model_1_atul',max_checkpoints = 1,tensorboard_verbose = 2)
# Loads the model weights from the checkpoint
self.load_model()
filter_size=3,
activation='relu',
strides=1,
weight_decay=0.0)
net = tflearn.layers.conv.conv_2d(net,
nb_filter=64,
filter_size=3,
activation='relu',
strides=1,
weight_decay=0.0)
net = tflearn.layers.conv.max_pool_2d(net,
kernel_size=2,
strides=2,
padding='same',
name='MaxPool2D')
net = tflearn.layers.core.dropout(net, keep_prob=0.25)
net = tflearn.layers.core.flatten(net, name='Flatten')
net = fully_connected(net, 1024,
activation='tanh',
weights_init='truncated_normal',
bias_init='zeros',
regularizer=None,
weight_decay=0)
net = tflearn.layers.core.dropout(net, keep_prob=0.5)
y_conv = fully_connected(net, 369,
activation='softmax',
weights_init='truncated_normal',
bias_init='zeros',
regularizer=None,
weight_decay=0)
total_parameters = 0
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network,
max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')
return model