Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def preprocess_data(self):
self.train_X = self.config[a.data][a.train_X]
self.train_y = self.config[a.data][a.train_Y]
perm = np.random.permutation(np.shape(self.train_X)[0])
self.train_X = self.train_X[perm]
self.train_y = self.train_y[perm]
self.valid_X = self.config[a.data][a.valid_X]
self.valid_y = self.config[a.data][a.valid_Y]
self.train_size = np.shape(self.config[a.data][a.train_X])[0]
#if self.train_size == self.batch_size: self.train_size = self.train_X.shape[1]
logger.debug(f'\ntrain_X.shape = {self.train_X.shape},\n\
train_y.shape = {self.train_y.shape},\n\
input_shape = {self.input_shape}')
self.train_X = self.train_X.reshape(
[-1]+self.input_shape).astype('float32')
self.valid_X = self.valid_X.reshape(
[-1]+self.input_shape).astype('float32')
self.np_label_type = 'float32' if self.num_outputs == 1 else 'int64'
self.train_y = np.squeeze(self.train_y).astype(self.np_label_type)
self.valid_y = np.squeeze(self.valid_y).astype(self.np_label_type)
logger.debug(f'\n after reshaping: train_X.shape = {self.train_X.shape},\n\
train_y.shape = {self.train_y.shape},\n\
# logger.debug(f'[STEP] num worker = {config["num_worker"]}')
# logger.debug(f'[STEP] step = {config["step"]}')
logger.debug('[PARAM] Creating StateSpace')
config['state_space'] = StateSpace(config['state_space'], config['num_layers'])
logger.debug('[PARAM] StateSpace created')
logger.debug('[PARAM] Loading data')
load_data = import_module(param_dict['load_data_module_name']).load_data
# Loading data
(t_X, t_y), (v_X, v_y) = load_data(dest='DATA')
logger.debug('[PARAM] Data loaded')
config['input_shape'] = list(np.shape(t_X))[1:]
config[a.data] = { a.train_X: t_X,
a.train_Y: t_y,
a.valid_X: v_X,
a.valid_Y: v_y }
action = config['arch_seq']
logger.debug(f'[ACTION] = {action}')
architecture = config['state_space'].action2dict(config, action)
logger.debug(f'[ARCHIT] = {pformat(architecture)}')
# For all the Net generated by the CONTROLLER
trainer = BasicTrainer(config)
# Run the trainer and get the rewards
reward = trainer.get_rewards(architecture)
logger.debug(f'[REWARD/RESULT] = {reward}')
result = reward
net = tf.layers.conv2d(net, filters=num_filters, kernel_size=[filter_height, filter_width],
strides=[
stride_height, stride_width], padding=padding,
kernel_initializer=weights_initializer, activation=activation, reuse=reuse,
name=arch_key + '/{0}'.format(a.conv2D))
if pool_height != 1 and pool_width !=1:
net = tf.layers.max_pooling2d(net, [pool_height, pool_width], strides=[1, 1])
elif layer_type == a.conv1D:
conv_params = layer_params
num_filters = conv_params[a.num_filters]
filter_size = conv_params[a.filter_size]
padding = conv_params[a.padding]
stride_size = conv_params[a.stride_size]
pool_size = conv_params[a.pool_size]
if conv_params[a.batch_norm]:
if conv_params[a.batch_norm_bef]:
net = tf.layers.conv1d(net,
filters=num_filters,
kernel_size=[filter_size],
strides=[stride_size],
padding=padding,
kernel_initializer=weights_initializer,
activation=None,
reuse=reuse,
name=arch_key + '/{0}'.format(a.conv1D))
net = tf.layers.batch_normalization(
net,
reuse=reuse,
name=arch_key + '/{0}'.format(a.batch_norm))
net = activation(net)
else:
net = tf.layers.conv1d(net, filters=num_filters, kernel_size=[filter_size],
a.activation] in self.act_dict else tf.nn.relu
if 'skip_conn' in layer_params:
net = self.get_layer_input(nets, layer_params['skip_conn'])
else:
net = nets[-1]
if layer_type == a.conv2D:
conv_params = self.conv2D_params.copy()
conv_params.update(layer_params)
num_filters = conv_params[a.num_filters]
filter_width = conv_params[a.filter_width]
filter_height = conv_params[a.filter_height]
padding = conv_params[a.padding]
stride_height = conv_params[a.stride_height]
stride_width = conv_params[a.stride_width]
pool_width = conv_params[a.pool_width]
pool_height = conv_params[a.pool_height]
if conv_params[a.batch_norm]:
if conv_params[a.batch_norm_bef]:
net = tf.layers.conv2d(net, filters=num_filters, kernel_size=[filter_height, filter_width], strides=[
stride_height, stride_width], padding=padding, kernel_initializer=weights_initializer,activation=None, reuse=reuse, name=arch_key+'/{0}'.format(a.conv2D))
net = tf.layers.batch_normalization(
net, reuse=reuse, name=arch_key+'/{0}'.format(a.batch_norm))
net = activation(net)
else:
net = tf.layers.conv2d(net,
filters=num_filters,
kernel_size=[filter_height,
filter_width],
strides=[stride_height, stride_width], padding=padding,
kernel_initializer=weights_initializer,activation=activation,
reuse=reuse,
name=arch_key + '/{0}'.format(a.conv2D))
reuse=reuse,
name=arch_key + '/{0}'.format(a.tempconv))
net = tf.layers.batch_normalization(
net,
reuse=reuse,
name=arch_key + '/{0}'.format(a.batch_norm))
net = activation(net)
else:
net = tf.layers.conv1d(net, filters=num_filters, kernel_size=[filter_size],
strides=[
stride_size], padding=padding, dilation_rate = dilation,
kernel_initializer=weights_initializer,
data_format='channels_last',
activation=activation,
reuse=reuse,
name=arch_key + '/{0}'.format(a.tempconv))
net = tf.layers.batch_normalization(
net, reuse=reuse, name=arch_key + '/{0}'.format(a.batch_norm))
else:
net = tf.layers.conv1d(net,
filters=num_filters,
kernel_size=[filter_size],
strides=[stride_size],
padding=padding,
dilation_rate=dilation,
kernel_initializer=weights_initializer, activation=activation,
reuse=reuse,
data_format='channels_last',
name=arch_key + '/{0}'.format(a.tempconv))
net = tf.contrib.layers.layer_norm(net)
if pool_size != 1:
net = tf.layers.max_pooling1d(net, (pool_size,), strides=1)
def preprocess_data(self):
self.train_X = self.config[a.data][a.train_X]
self.train_y = self.config[a.data][a.train_Y]
perm = np.random.permutation(np.shape(self.train_X)[0])
self.train_X = self.train_X[perm]
self.train_y = self.train_y[perm]
self.valid_X = self.config[a.data][a.valid_X]
self.valid_y = self.config[a.data][a.valid_Y]
self.train_size = np.shape(self.config[a.data][a.train_X])[0]
#if self.train_size == self.batch_size: self.train_size = self.train_X.shape[1]
logger.debug(f'\ntrain_X.shape = {self.train_X.shape},\n\
train_y.shape = {self.train_y.shape},\n\
input_shape = {self.input_shape}')
self.train_X = self.train_X.reshape(
[-1]+self.input_shape).astype('float32')
self.valid_X = self.valid_X.reshape(
[-1]+self.input_shape).astype('float32')
self.np_label_type = 'float32' if self.num_outputs == 1 else 'int64'
def __init__(self, config, arch_def):
self.hyper_params = config[a.hyperparameters]
self.learning_rate = config[a.hyperparameters][a.learning_rate]
self.optimizer_name = config[a.hyperparameters][a.optimizer]
self.batch_size = config[a.hyperparameters][a.batch_size]
self.loss_metric_name = config[a.hyperparameters][a.loss_metric]
self.max_grad_norm = config[a.hyperparameters][a.max_grad_norm]
self.test_metric_name = config[a.hyperparameters][a.test_metric]
self.num_steps = config[a.num_steps] #for image it is [IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS], for vector [NUM_ATTRIBUTES]
self.num_outputs = config[a.num_outputs]
self.num_features = config[a.num_features]
self.text_input = config[a.text_input]
self.regression = config[a.regression]
self.eval_batch_size = config[a.hyperparameters][a.eval_batch_size]
self.unit_type = config[a.unit_type]
self.arch_def = arch_def
self.vocab_size = len(config[a.data][a.vocabulary])
self.conv1D_params = { a.num_filters: 32,
a.filter_size: 3,
a.stride_size: 1,
a.pool_size: 1,
a.drop_out: 1,
a.padding: 'SAME',
a.activation: a.relu,
a.batch_norm: False,
a.batch_norm_bef: True}
self.conv2D_params = { a.num_filters: 32,
a.filter_height: 3,
a.filter_width: 3,
a.stride_height: 1,
def preprocess_data(self):
self.train_X = self.config[a.data][a.train_X]
self.train_y = self.config[a.data][a.train_Y]
perm = np.random.permutation(np.shape(self.train_X)[0])
self.train_X = self.train_X[perm]
self.train_y = self.train_y[perm]
self.valid_X = self.config[a.data][a.valid_X]
self.valid_y = self.config[a.data][a.valid_Y]
self.train_size = np.shape(self.config[a.data][a.train_X])[0]
#if self.train_size == self.batch_size: self.train_size = self.train_X.shape[1]
logger.debug(f'\ntrain_X.shape = {self.train_X.shape},\n\
train_y.shape = {self.train_y.shape},\n\
input_shape = {self.input_shape}')
self.train_X = self.train_X.reshape(
[-1]+self.input_shape).astype('float32')
self.valid_X = self.valid_X.reshape(
[-1]+self.input_shape).astype('float32')
self.np_label_type = 'float32' if self.num_outputs == 1 else 'int64'
self.train_y = np.squeeze(self.train_y).astype(self.np_label_type)
self.valid_y = np.squeeze(self.valid_y).astype(self.np_label_type)
logger.debug(f'\n after reshaping: train_X.shape = {self.train_X.shape},\n\
train_y.shape = {self.train_y.shape},\n\
input_shape = {self.input_shape}')
logger.debug(f'[STEP] num worker = {config["num_worker"]}')
logger.debug(f'[STEP] step = {config["step"]}')
logger.debug('[PARAM] Creating StateSpace')
config['state_space'] = a.StateSpace(config['state_space'])
logger.debug('[PARAM] StateSpace created')
logger.debug('[PARAM] Loading data')
load_data = import_module(param_dict['load_data_module_name']).load_data
# Loading data
(t_X, t_y), (v_X, v_y) = load_data(dest='DATA')
logger.debug('[PARAM] Data loaded')
config['input_shape'] = list(np.shape(t_X))[1:]
config[a.data] = { a.train_X: t_X,
a.train_Y: t_y,
a.valid_X: v_X,
a.valid_Y: v_y }
action = config['arch_seq']
logger.debug(f'[ACTION] = {action}')
architecture = action2dict_v2(config, action, config['num_layers'])
logger.debug(f'[ARCHIT] = {pformat(architecture)}')
# For all the Net generated by the CONTROLLER
trainer = BasicTrainer(config)
arch_def = architecture
global_step = config['global_step']
# Run the trainer and get the rewards
logger.debug('[PARAM] StateSpace created')
logger.debug('[PARAM] Loading data')
load_data = import_module(param_dict['load_data_module_name']).load_data
# Loading data
config['num_steps'] = 10
data_cfg = {'num_steps':config['num_steps'], 'batch_size':config['hyperparameters']['batch_size'], 'dest':'/Users/Dipendra/Projects/deephyper/benchmarks/ptbNas/DATA'}
(t_X, t_y), (v_X, v_y), (test_X, test_y), vocab = load_data(data_cfg)
logger.debug('[PARAM] Data loaded')
config['input_shape'] = list(np.shape(t_X))[1:]
config[a.data] = { a.train_X: t_X,
a.train_Y: t_y,
a.valid_X: v_X,
a.valid_Y: v_y ,
a.test_X: test_X,
a.test_Y: test_y,
a.vocabulary: vocab}
config[a.vocab_size] = len(vocab)
action = config['arch_seq']
logger.debug(f'[ACTION] = {action[0]}')
architecture = action2dict_v2(config, action, config['num_layers'])
logger.debug(f'[ARCHIT] = {pformat(architecture)}')
# For all the Net generated by the CONTROLLER
trainer = BasicTrainer(config)
arch_def = architecture
global_step = config['global_step']