Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
X = X.reshape((-1,100,100,1))
X /= 255
Oneencoder = OneHotEncoder()
y = Oneencoder.fit_transform(y)
print("Data is ready!")
print("Training is starting!")
# Building convolutional network
network = input_data(shape=[None, 100, 100, 1], name='input')
network = conv_2d(network, 32, 5, activation='relu')
network = avg_pool_2d(network, 2)
network = conv_2d(network, 64, 5, activation='relu')
network = avg_pool_2d(network, 2)
network = fully_connected(network, 128, activation='relu')
network = fully_connected(network, 64, activation='relu')
network = fully_connected(network, 2, activation='softmax',restore=False)
network = regression(network, optimizer='adam', learning_rate=0.0001,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
model.load('model/my_model.tflearn')
model.fit(X, y.toarray(), n_epoch=3, validation_set=0.1, shuffle=True,
show_metric=True, batch_size=32, snapshot_step=100,
snapshot_epoch=False, run_id='model_finetuning')
# # uncomment this part if you want to save finetuned model
# model.save('model/my_model.tflearn')
print("Finetuning is DONE!")
print("Liveness Model is ready!")
else:
# Building 'AlexNet'
network = input_data(shape=[None, 227, 227, 3])
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 8, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=0.001)
# Training
model = tflearn.DNN(network,tensorboard_dir='/uhpc/roysam/aditi/alexnet', checkpoint_path='/uhpc/roysam/aditi/alexnet/model_alexnet',max_checkpoints=1, tensorboard_verbose=2)
model.fit(X_train, Y_train, n_epoch=1000, validation_set=0.1, shuffle=True,
show_metric=True, batch_size=64, snapshot_step=200,
snapshot_epoch=False, run_id='training')
self.embedding_weights, self.config = ops.embedding_layer(
metadata_path, embedding_weights)
self.embedded_text = tf.nn.embedding_lookup(self.embedding_weights,
self.input)
with tf.name_scope("CNN_LSTM"):
self.cnn_out = ops.multi_filter_conv_block(self.embedded_text,
self.args["n_filters"],
dropout_keep_prob=self.args["dropout"])
self.lstm_out = ops.lstm_block(self.cnn_out,
self.args["hidden_units"],
dropout=self.args["dropout"],
layers=self.args["rnn_layers"],
dynamic=False,
bidirectional=self.args["bidirectional"])
self.out = tf.squeeze(fully_connected(self.lstm_out, 1, activation='sigmoid'))
with tf.name_scope("loss"):
self.loss = losses.mean_squared_error(self.sentiment, self.out)
if self.args["l2_reg_beta"] > 0.0:
self.regularizer = ops.get_regularizer(self.args["l2_reg_beta"])
self.loss = tf.reduce_mean(self.loss + self.regularizer)
#### Evaluation Measures.
with tf.name_scope("Pearson_correlation"):
self.pco, self.pco_update = tf.contrib.metrics.streaming_pearson_correlation(
self.out, self.sentiment, name="pearson")
with tf.name_scope("MSE"):
self.mse, self.mse_update = tf.metrics.mean_squared_error(
self.sentiment, self.out, name="mse")
def build_network(self):
# Smaller 'AlexNet'
# https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
print('[+] Building CNN')
self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
self.network = conv_2d(self.network, 64, 5, activation='relu')
#self.network = local_response_normalization(self.network)
self.network = max_pool_2d(self.network, 3, strides=2)
self.network = conv_2d(self.network, 64, 5, activation='relu')
self.network = max_pool_2d(self.network, 3, strides=2)
self.network = conv_2d(self.network, 128, 4, activation='relu')
self.network = dropout(self.network, 0.3)
self.network = fully_connected(self.network, 3072, activation='relu')
self.network = fully_connected(
self.network, len(EMOTIONS), activation='softmax')
self.network = regression(
self.network,
optimizer='momentum',
loss='categorical_crossentropy'
)
self.model = tflearn.DNN(
self.network,
checkpoint_path=SAVE_DIRECTORY + '/emotion_recognition',
max_checkpoints=1,
tensorboard_verbose=2
)
self.load_model()
x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
x3=x
x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
#12 16
x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
x4=x
x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
#6 8
x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
x5=x
x=tflearn.layers.conv.conv_2d(x,512,(5,5),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
x_additional=tflearn.layers.core.fully_connected(x,2048,activation='relu',weight_decay=1e-3,regularizer='L2')
x_additional=tflearn.layers.core.fully_connected(x_additional,1024,activation='relu',weight_decay=1e-3,regularizer='L2')
x_additional=tflearn.layers.core.fully_connected(x_additional,256*3,activation='linear',weight_decay=1e-3,regularizer='L2')
x_additional=tf.reshape(x_additional,(BATCH_SIZE,256,3))
x=tflearn.layers.conv.conv_2d_transpose(x,256,[5,5],[6,8],strides=2,activation='linear',weight_decay=1e-5,regularizer='L2')
x5=tflearn.layers.conv.conv_2d(x5,256,(3,3),strides=1,activation='linear',weight_decay=1e-5,regularizer='L2')
x=tf.nn.relu(tf.add(x,x5))
x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
x=tflearn.layers.conv.conv_2d_transpose(x,128,[5,5],[12,16],strides=2,activation='linear',weight_decay=1e-5,regularizer='L2')
x4=tflearn.layers.conv.conv_2d(x4,128,(3,3),strides=1,activation='linear',weight_decay=1e-5,regularizer='L2')
x=tf.nn.relu(tf.add(x,x4))
x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
x=tflearn.layers.conv.conv_2d_transpose(x,64,[5,5],[24,32],strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
x3=tflearn.layers.conv.conv_2d(x3,64,(3,3),strides=1,activation='linear',weight_decay=1e-5,regularizer='L2')
x=tf.nn.relu(tf.add(x,x3))
x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
x=tflearn.layers.conv.conv_2d(x,3,(3,3),strides=1,activation='linear',weight_decay=1e-5,regularizer='L2')
landmarks_network = input_data(shape=[None, 2728], name='input2')
elif NETWORK.use_hog_and_landmarks:
landmarks_network = input_data(shape=[None, 208], name='input2')
else:
landmarks_network = input_data(shape=[None, 68, 2], name='input2')
landmarks_network = fully_connected(landmarks_network, 1024, activation=NETWORK.activation)
if NETWORK.use_batchnorm_after_fully_connected_layers:
landmarks_network = batch_normalization(landmarks_network)
landmarks_network = fully_connected(landmarks_network, 128, activation=NETWORK.activation)
if NETWORK.use_batchnorm_after_fully_connected_layers:
landmarks_network = batch_normalization(landmarks_network)
images_network = fully_connected(images_network, 128, activation=NETWORK.activation)
network = merge([images_network, landmarks_network], 'concat', axis=1)
else:
network = images_network
network = fully_connected(network, NETWORK.output_size, activation='softmax')
if optimizer == 'momentum':
optimizer = Momentum(learning_rate=learning_rate, momentum=optimizer_param,
lr_decay=learning_rate_decay, decay_step=decay_step)
elif optimizer == 'adam':
optimizer = Adam(learning_rate=learning_rate, beta1=optimizer_param, beta2=learning_rate_decay)
else:
print( "Unknown optimizer: {}".format(optimizer))
network = regression(network, optimizer=optimizer, loss=NETWORK.loss, learning_rate=learning_rate, name='output')
return network
name='MaxPool2D')
net = tflearn.layers.conv.conv_2d(net,
nb_filter=64,
filter_size=3,
activation='relu',
strides=1,
weight_decay=0.0)
net = tflearn.layers.core.flatten(net, name='Flatten')
net = fully_connected(net, 1024,
activation='tanh',
weights_init='truncated_normal',
bias_init='zeros',
regularizer=None,
weight_decay=0)
net = tflearn.layers.core.dropout(net, keep_prob=0.5)
y_conv = fully_connected(net, 369,
activation='softmax',
weights_init='truncated_normal',
bias_init='zeros',
regularizer=None,
weight_decay=0)
# for op in y_conv.get_operations():
# flops = ops.get_stats_for_node_def(g, op.node_def, 'flops').value
# print("FLOPS: %s" % str(flops))
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
print(" shape: %s" % str(shape))
variable_parametes = 1
network = conv_3d(network, 96, 11, strides=4, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 256, 5, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 256, 3, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 3, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
def make_core_network(network):
network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
return network
def _build_network(self, layers):
network = tf.transpose(self.input_tensor, [0, 2, 3, 1])
# [batch, assets, window, features]
network = network / network[:, :, -1, 0, None, None]
for layer_number, layer in enumerate(layers):
if layer["type"] == "DenseLayer":
network = tflearn.layers.core.fully_connected(network,
int(layer["neuron_number"]),
layer["activation_function"],
regularizer=layer["regularizer"],
weight_decay=layer["weight_decay"] )
elif layer["type"] == "DropOut":
network = tflearn.layers.core.dropout(network, layer["keep_probability"])
elif layer["type"] == "EIIE_Dense":
width = network.get_shape()[2]
network = tflearn.layers.conv_2d(network, int(layer["filter_number"]),
[1, width],
[1, 1],
"valid",
layer["activation_function"],
regularizer=layer["regularizer"],
weight_decay=layer["weight_decay"])
elif layer["type"] == "ConvLayer":