Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
tb = callbacks.TensorBoard(log_dir=args.save_dir + '/tensorboard-logs', batch_size=args.batch_size, histogram_freq=int(args.debug), write_grads=False)
checkpoint1 = CustomModelCheckpoint(model, args.save_dir + '/best_weights_1' + appendix + '.h5', monitor='val_capsnet_acc',
save_best_only=False, save_weights_only=True, verbose=1)
checkpoint2 = CustomModelCheckpoint(model, args.save_dir + '/best_weights_2' + appendix + '.h5', monitor='val_capsnet_acc',
save_best_only=True, save_weights_only=True, verbose=1)
lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.lr * 0.5**(epoch // 10))
if(args.numGPU > 1):
parallel_model = multi_gpu_model(model, gpus=args.numGPU)
else:
parallel_model = model
if(not hard_training):
parallel_model.compile(optimizer=optimizers.Adam(lr=args.lr), loss=[margin_loss, 'mse'], loss_weights=[1, 0.4], metrics={'capsnet': "accuracy"})
else:
parallel_model.compile(optimizer=optimizers.Adam(lr=args.lr), loss=[margin_loss_hard, 'mse'], loss_weights=[1, 0.4], metrics={'capsnet': "accuracy"})
# Begin: Training with data augmentation
def train_generator(x, y, batch_size, shift_fraction=args.shift_fraction):
train_datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False,
samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0.1,
width_shift_range=0.1, height_shift_range=0.1, shear_range=0.0,
zoom_range=0.1, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=True,
vertical_flip=False, rescale=None, preprocessing_function=None,
data_format=None) # shift up to 2 pixel for MNIST
train_datagen.fit(x)
generator = train_datagen.flow(x, y, batch_size=batch_size, shuffle=True)
while True:
x_batch, y_batch = generator.next()
yield ([x_batch, y_batch], [y_batch, x_batch])
labels_list = labels.tolist()
# Create ImageDataGenerator
aug = ImageDataGenerator(rotation_range=rotation_range, width_shift_range=width_shift_range,
height_shift_range=height_shift_range, shear_range=shear_range, zoom_range=zoom_range,
horizontal_flip=horizontal_flip, fill_mode=fill_mode)
# Setup or Load model
try:
net = load_model("model.net")
except:
net = Net.build(width=scale_size[0], height=scale_size[1], depth=3, classes=2)
opt = Adam(lr=init_lr, decay=init_lr / epochs)
net.compile(loss="binary_crossentropy", optimizer=opt)
# Train
# print("Training a Linear SVM Classifier")
net.fit_generator(aug.flow(x=np.asarray(ims_list), y=labels, batch_size=bs), steps_per_epoch=len(ims) // bs, epochs=epochs)
# net.fit(x=np.asarray(ims_list), y=labels,epochs=epochs)
# If feature directories don't exist, create them
if not os.path.isdir(model_path):
os.makedirs(model_path)
# im = cv2.imread("mac.jpeg")
# im = cv2.resize(im, scale_size)
# im = img_to_array(im)
# im = np.expand_dims(im, axis=0)
X_in = Input(shape=(X.shape[1],))
# Define model architecture
# NOTE: We pass arguments for graph convolutional layers as a list of tensors.
H = Dropout(rate=0.5)(X_in)
H = GraphConvolution(16, support, activation='relu',
kernel_regularizer=l2(5e-4))([H]+G)
H = Dropout(rate=0.5)(H)
Y = GraphConvolution(y.shape[1], support, activation='softmax')([H]+G)
# Compile model
model = Model(inputs=[X_in]+G, outputs=Y)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=0.01), weighted_metrics=['acc'])
#model.summary()
# Callbacks for EarlyStopping
es_callback = EarlyStopping(monitor='val_weighted_acc', patience=PATIENCE)
# Train
validation_data = (graph, y_val, val_mask)
model.fit(graph, y_train, sample_weight=train_mask,
batch_size=A.shape[0],
epochs=NB_EPOCH,
verbose=1,
validation_data=validation_data,
shuffle=False,
callbacks=[es_callback])
validation_size = gen_val.n_streams
gen_val = pescador.maps.keras_tuples(gen_val(),
inputs=inputs,
outputs=outputs)
loss = {'chord_tag': 'sparse_categorical_crossentropy'}
metrics = {'chord_tag': 'sparse_categorical_accuracy'}
loss.update(chord_pitch='binary_crossentropy',
chord_root='sparse_categorical_crossentropy',
chord_bass='sparse_categorical_crossentropy')
monitor = 'val_chord_tag_sparse_categorical_accuracy'
#sgd = K.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
adam = K.optimizers.Adam()
model.compile(adam, loss=loss, metrics=metrics)
# Store the model
model_spec = K.utils.serialize_keras_object(model)
with open(os.path.join(OUTPUT_PATH, 'model_spec.pkl'), 'wb') as fd:
pickle.dump(model_spec, fd)
# Construct the weight path
weight_path = os.path.join(OUTPUT_PATH, 'model.h5')
# Build the callbacks
cb = []
cb.append(K.callbacks.ModelCheckpoint(weight_path,
save_best_only=True,
verbose=1,
monitor=monitor))
if cutoff is None:
choice = cutoff_choice(dataset_id, max_nb_variables)
else:
assert cutoff in ['pre', 'post'], 'Cutoff parameter value must be either "pre" or "post"'
choice = cutoff
if choice not in ['pre', 'post']:
return
else:
_, X_test = cutoff_sequence(None, X_test, choice, dataset_id, max_nb_variables)
if not is_timeseries:
X_test = pad_sequences(X_test, maxlen=MAX_NB_VARIABLES[dataset_id], padding='post', truncating='post')
y_test = to_categorical(y_test, len(np.unique(y_test)))
optm = Adam(lr=1e-3)
model.compile(optimizer=optm, loss='categorical_crossentropy', metrics=['accuracy'])
if dataset_fold_id is None:
weight_fn = "./weights/%s_weights.h5" % dataset_prefix
else:
weight_fn = "./weights/%s_fold_%d_weights.h5" % (dataset_prefix, dataset_fold_id)
model.load_weights(weight_fn)
if test_data_subset is not None:
X_test = X_test[:test_data_subset]
y_test = y_test[:test_data_subset]
print("\nEvaluating : ")
loss, accuracy = model.evaluate(X_test, y_test, batch_size=batch_size)
print()
print("Final Accuracy : ", accuracy)
x = Conv2D(filters=48, kernel_size=(5, 5), padding='same', activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=64, kernel_size=(5, 5), padding='same', activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.3)(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.3)(x)
out = [Dense(10, name='digit%d' % i, activation='softmax')(x) for i in range(NUM_OF_LETTERS)]
model = Model(inputs=input_layer, outputs=out)
# initiate Adam optimizer
opt = keras.optimizers.Adam(lr=0.0001, beta_1=0.99, beta_2=0.9999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='binary_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.summary()
digit_acc = [[] for _ in range(NUM_OF_LETTERS)]
val_digit_acc = [[] for _ in range(NUM_OF_LETTERS)]
loss = []
val_loss = []
def plot_diagram(digit_acc_now, val_digit_acc_now, loss_now, val_loss_now):
global digit_acc, val_digit_acc, loss, val_loss
for i in range(NUM_OF_LETTERS):
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init, W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2,2))(u)
u = merge((a1, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init, W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init, W_regularizer=l2(lmbda), border_mode='same')(u)
#final output
final_activation = 'sigmoid'
u = Convolution2D(1, 1, 1, activation=final_activation, init=init, W_regularizer=l2(lmbda), name='output', border_mode='same')(u)
u = Reshape((dim, dim))(u)
model = Model(input=img_input, output=u)
#optimizer/compile
optimizer = Adam(lr=learn_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
print(model.summary())
return model
# no need to expand channels because they are equal
shortcut_y = keras.layers.normalization.BatchNormalization()(output_block_2)
output_block_3 = keras.layers.add([shortcut_y, conv_z])
output_block_3 = keras.layers.Activation('relu')(output_block_3)
# FINAL
gap_layer = keras.layers.GlobalAveragePooling1D()(output_block_3)
output_layer = keras.layers.Dense(nb_classes, activation='softmax')(gap_layer)
model = keras.models.Model(inputs=input_layer, outputs=output_layer)
model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=50, min_lr=0.0001)
# file_path = self.output_directory + 'best_model.hdf5'
# model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path, monitor='loss',
# save_best_only=True)
# self.callbacks = [reduce_lr, model_checkpoint]
self.callbacks = [reduce_lr]
return model
def get_optimizer(config):
"""Return an optimizer."""
lr = config['optimizer']['initial_lr']
optimizer = Adam(lr=lr) # Using Adam instead of SGD to speed up training
return optimizer
z_in = Input(shape=(options.hidden,))
s_in = Input(shape=(None,))
seq = embedding(s_in)
z_exp_h = expandz_h(z_in)
if options.rnn_type == 'lstm':
z_exp_c = expandz_c(z_in)
state = [z_exp_h, z_exp_c]
else:
state = z_exp_h
h = decoder_rnn(seq, initial_state=state)
out = towords(h)
decoder = Model([s_in, z_in], out)
## Compile the autoencoder model for training
opt = keras.optimizers.Adam(lr=options.lr)
auto.compile(opt, sparse_loss)
auto.summary()
instances_seen = 0
for epoch in range(options.epochs+1):
klw = anneal(epoch, options.epochs)
print('EPOCH {:03}: Set KL weight to {}'.format(epoch, klw))
K.set_value(kl.weight, klw)
for batch in tqdm(x):
n, l = batch.shape
batch_shifted = np.concatenate([np.ones((n, 1)), batch], axis=1) # prepend start symbol