Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
(x_train, y_train), (x_test, y_test) = load_data()
if verbose == 1:
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
timer.end() # stage in
timer.start("preprocessing")
# Convert class vectors to binary class matrices.
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
if model_path:
savedModel = util.resume_from_disk(BNAME, param_dict, data_dir=model_path)
model_mda_path = savedModel.model_mda_path
model_path = savedModel.model_path
model = savedModel.model
initial_epoch = savedModel.initial_epoch
else:
model_mda_path = None
model = None
initial_epoch = 0
if not model:
model = Sequential()
model.add(Conv2D(F1_UNITS, (F1_SIZE, F1_SIZE), strides = (STRIDE1, STRIDE1), padding=PADDING_C1,
input_shape=x_train.shape[1:], activation=ACTIVATION1))
model.add(Conv2D(F1_UNITS, (F1_SIZE, F1_SIZE), strides = (STRIDE1, STRIDE1), padding=PADDING_C1, activation=ACTIVATION2))
model.add(MaxPooling2D(pool_size=(P_SIZE, P_SIZE), padding=PADDING_P1))
def build_parser():
# Build this benchmark's parser on top of the common parser.
parser = build_base_parser()
# Benchmark specific hyperparameters.
parser.add_argument("--data_augmentation", action="store", type=util.str2bool, dest="data_augmentation",
default=False, help="boolean. data_augmentation?")
parser.add_argument("--f1_size", action="store", dest="f1_size",
nargs="?", const=2, type=int, default=3,
help="Filter 1 dim")
parser.add_argument("--f2_size", action="store", dest="f2_size",
nargs="?", const=2, type=int, default=3,
help="Filter 2 dim")
parser.add_argument("--f1_units", action="store", dest="f1_units",
nargs="?", const=2, type=int, default=32,
help="Filter 1 units")
parser.add_argument("--f2_units", action="store", dest="f2_units",
nargs="?", const=2, type=int, default=64,
# Set random seeds for reproducability: https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
import numpy as np
import tensorflow as tf
import random as rn
import os
os.environ["PYTHONHASHSEED"] = "0"
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
from deephyper.benchmarks_hps import util
timer = util.Timer()
timer.start("module loading")
import sys
import keras
from pprint import pprint
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
from keras.preprocessing import sequence
from deephyper.benchmarks_hps.cliparser import build_base_parser
from keras.callbacks import EarlyStopping, TerminateOnNaN
import math
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
if MODEL_PATH:
savedModel = util.resume_from_disk(BNAME, param_dict, data_dir=MODEL_PATH)
model_mda_path = savedModel.model_mda_path
MODEL_PATH = savedModel.model_path
model = savedModel.model
initial_epoch = savedModel.initial_epoch
else:
model = None
model_mda_path = None
initial_epoch = 0
if model is None:
model = resnet_v2(input_shape=input_shape, depth=depth)
model.compile(loss='categorical_crossentropy',
optimizer=OPTIMIZER,
metrics=['accuracy'])
#model.summary()
def run(param_dict=None, verbose=2):
"""Run a param_dict on the imdbcnn benchmark."""
# Read in values from CLI if no param dict was specified and clean up the param dict.
param_dict = util.handle_cli(param_dict, build_parser())
# Display the filled in param dict.
if verbose:
print("PARAM_DICT_CLEAN=")
pprint(param_dict)
timer.start("stage in")
# if param_dict['data_source']:
# data_source = param_dict['data_source']
# else:
# data_source = os.path.dirname(os.path.abspath(__file__))
# data_source = os.path.join(data_source, 'data')
ngram_range = 1
MAX_FEATURES = param_dict['max_features'] # = 20000
def run(param_dict=None, verbose=2):
"""Run a param_dict on the MNISTCNN benchmark."""
# Read in values from CLI if no param_dict was specified and clean up the param dict.
param_dict = util.handle_cli(param_dict, build_parser())
# Display the filled in param dict.
if verbose:
print("PARAM_DICT_CLEAN=")
pprint(param_dict)
# Get values from param_dict.
# Hyperparameters
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
ACTIVATION3 = util.get_activation_instance(param_dict['activation3'], param_dict['alpha3'])
ACTIVATION4 = util.get_activation_instance(param_dict['activation4'], param_dict['alpha4'])
ACTIVATION5 = util.get_activation_instance(param_dict['activation5'], param_dict['alpha5'])
BATCH_SIZE = param_dict["batch_size"]
DROPOUT = param_dict["dropout"]
EPOCHS = param_dict["epochs"]
F1_SIZE = param_dict["f1_size"]
F2_SIZE = param_dict["f2_size"]
F1_UNITS = param_dict["f1_units"]
F2_UNITS = param_dict["f2_units"]
MAX_POOL = param_dict["max_pool"]
NUNITS = param_dict["nunits"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
PADDING_C1 = param_dict["padding_c1"]
PADDING_C2 = param_dict["padding_c2"]
PADDING_P1 = param_dict["padding_p1"]
def run(param_dict=None, verbose=2):
"""Run a param_dict on the reutersmlp benchmark."""
# Read in values from CLI if no param dict was specified and clean up the param dict.
param_dict = util.handle_cli(param_dict, build_parser())
# Display the parsed param dict.
if verbose:
print("PARAM_DICT_CLEAN=")
pprint(param_dict)
# Get values from param_dict.
# Hyperparameters
ACTIVATION1 = util.get_activation_instance(param_dict["activation1"], param_dict["alpha1"])
ACTIVATION2 = util.get_activation_instance(param_dict["activation2"], param_dict["alpha2"])
ACTIVATION3 = util.get_activation_instance(param_dict["activation3"], param_dict["alpha3"])
ACTIVATION4 = util.get_activation_instance(param_dict["activation4"], param_dict["alpha4"])
ACTIVATION5 = util.get_activation_instance(param_dict["activation5"], param_dict["alpha5"])
BATCH_SIZE = param_dict["batch_size"]
DROPOUT = param_dict["dropout"]
EPOCHS = param_dict["epochs"]
MAX_WORDS = param_dict["max_words"]
NHIDDEN = param_dict['nhidden']
NUNITS = param_dict["nunits"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
SKIP_TOP = param_dict["skip_top"]
# Other
model_path = param_dict["model_path"]
# Constants
patience = math.ceil(EPOCHS/2)