Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Hyperparameters
# Training parameters
ACTIVATION = param_dict['activation']
BATCH_SIZE = param_dict["batch_size"] #=32
BASE_LR = param_dict['base_lr']
DATA_AUG = param_dict['data_augmentation']
EPOCHS = param_dict['epochs']
KERNEL_SIZE = param_dict['kernel_size']
LR80 = param_dict['lr80']
LR120 = param_dict['lr120']
LR160 = param_dict['lr160']
LR180 = param_dict['lr180']
NUM_FILTERS = param_dict['num_filters']
NUM_FILTERS_IN = param_dict["num_filters_in"] #16
OPTIMIZER = util.get_optimizer_instance(param_dict)
#param_dict["n"] is used only when testing different depths of the resnet
# Other
LOSS_FUNCTION = param_dict["loss_function"]
METRICS = param_dict["metrics"]
MODEL_PATH = param_dict["model_path"]
# Constants
num_classes = 10
subtract_pixel_mean = True
N = 3
depth = N * 9 + 2
model_type = 'ResNet%dv%d' % (depth, 2)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
timer.end()
#hyperparameters
BATCH_SIZE = param_dict['batch_size']
EPOCHS = param_dict['epochs']
DROPOUT = param_dict['dropout']
ACTIVATION = util.get_activation_instance(param_dict['activation'], param_dict['alpha'])
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
NHIDDEN = param_dict['nhidden']
NUNITS = param_dict['nunits']
OPTIMIZER = util.get_optimizer_instance(param_dict)
#other
LOSS_FUNCTION = param_dict['loss_function']
METRICS = param_dict['metrics']
model_path = ''
#constants
num_classes = 10
patience = math.ceil(EPOCHS/2)
callbacks = [
EarlyStopping(monitor="val_acc", min_delta=0.0001, patience=patience, verbose=verbose, mode="auto"),
TerminateOnNaN()]
timer.start('preprocessing')
x_train = x_train.reshape(60000, 784)
img_rows, img_cols = 28, 28
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, (img_rows), (img_cols))
x_test = x_test.reshape(x_test.shape[0], 1, (img_rows), (img_cols))
else:
x_train = x_train.reshape(x_train.shape[0], (img_rows), (img_cols), 1)
x_test = x_test.reshape(x_test.shape[0], (img_rows), (img_cols), 1)
BATCH_SIZE = param_dict['batch_size']
EPOCHS = param_dict['epochs']
OPTIMIZER = util.get_optimizer_instance(param_dict)
#others
#model_path = ''
# Constants
patience = math.ceil(EPOCHS/2)
callbacks = [
EarlyStopping(monitor="val_acc", min_delta=0.0001, patience=patience, verbose=verbose, mode="auto"),
TerminateOnNaN()]
num_classes = 10
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# data_source = os.path.join(data_source, "data")
# Get arguments from param_dict.
# Hyperparameters
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
BATCH_SIZE = param_dict["batch_size"]
DROPOUT = param_dict["dropout"]
EMBEDDING_DIMS = param_dict["embedding_dims"]
EPOCHS = param_dict["epochs"]
FILTERS = param_dict["filters"]
HIDDEN_DIMS = param_dict["hidden_dims"]
KERNEL_SIZE = param_dict["kernel_size"]
MAX_FEATURES = param_dict["max_features"]
MAXLEN = param_dict["maxlen"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
PADDING = param_dict["padding"]
STRIDES = param_dict["strides"]
# Other
model_path = param_dict["model_path"]
# Constants
patience = math.ceil(EPOCHS/2)
callbacks = [
EarlyStopping(monitor="val_acc", min_delta=0.0001, patience=patience, verbose=verbose, mode="auto"),
TerminateOnNaN()]
# Load data.
if verbose == 1:
print("Loading data...")
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=MAX_FEATURES)
return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))
(x_train, y_train), (x_test, y_test) = mnist.load_data()
timer.end()
num_classes = 10
BATCH_SIZE = param_dict['batch_size']
EPOCHS = param_dict['epochs']
DROPOUT = param_dict['dropout']
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
ACTIVATION3 = util.get_activation_instance(param_dict['activation3'], param_dict['alpha3'])
UNITS = param_dict['units']
OPTIMIZER = util.get_optimizer_instance(param_dict)
patience = math.ceil(EPOCHS/2)
callbacks = [
EarlyStopping(monitor="val_acc", min_delta=0.0001, patience=patience, verbose=verbose, mode="auto"),
TerminateOnNaN()]
timer.start('preprocessing')
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
input_shape = x_train.shape[1:]
digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)]
tr_pairs, tr_y = create_pairs(x_train, digit_indices)
# Hyperparameters
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
ACTIVATION3 = util.get_activation_instance(param_dict['activation3'], param_dict['alpha3'])
ACTIVATION4 = util.get_activation_instance(param_dict['activation4'], param_dict['alpha4'])
ACTIVATION5 = util.get_activation_instance(param_dict['activation5'], param_dict['alpha5'])
BATCH_SIZE = param_dict["batch_size"]
DATA_AUGMENTATION = param_dict["data_augmentation"]
DROPOUT = param_dict["dropout"]
EPOCHS = param_dict["epochs"]
F1_SIZE = param_dict["f1_size"]
F2_SIZE = param_dict["f2_size"]
F1_UNITS = param_dict["f1_units"]
F2_UNITS = param_dict["f2_units"]
NUNITS = param_dict["nunits"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
P_SIZE = param_dict["p_size"]
PADDING_C1 = param_dict["padding_c1"]
PADDING_C2 = param_dict["padding_c2"]
PADDING_P1 = param_dict["padding_p1"]
PADDING_P2 = param_dict["padding_p2"]
STRIDE1 = param_dict["stride1"]
STRIDE2 = param_dict["stride2"]
# Other
model_path = param_dict["model_path"]
# Constants
patience = math.ceil(EPOCHS/2)
callbacks = [
EarlyStopping(monitor="val_acc", min_delta=0.0001, patience=patience, verbose=verbose, mode="auto"),
TerminateOnNaN()]