Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def ffnet():
inputs = 2
outputs = 2
layers = 2
hidden_dimension = 50
# input variables denoting the features and label data
features = C.input_variable((inputs), np.float32)
label = C.input_variable((outputs), np.float32)
# Instantiate the feedforward classification model
my_model = Sequential ([
Dense(hidden_dimension, activation=C.sigmoid),
Dense(outputs)])
z = my_model(features)
ce = C.cross_entropy_with_softmax(z, label)
pe = C.classification_error(z, label)
# Instantiate the trainer object to drive the model training
lr_per_minibatch = C.learning_parameter_schedule(0.125)
progress_printer = ProgressPrinter(0)
trainer = C.Trainer(z, (ce, pe), [sgd(z.parameters, lr=lr_per_minibatch)], [progress_printer])
# Get minibatches of training data and perform model training
minibatch_size = 25
num_minibatches_to_train = 1024
aggregate_loss = 0.0
def _build_model(self):
with default_options(init=he_uniform(), activation=relu, bias=True):
model = Sequential([
Convolution((8, 8), 32, strides=(4, 4)),
Convolution((4, 4), 64, strides=(2, 2)),
Convolution((3, 3), 64, strides=(1, 1)),
Dense(512, init=he_normal(0.01)),
Dense(self._nb_actions, activation=None, init=he_normal(0.01))
])
return model
def create_model(input_dim):
row = sequence.input_variable(shape=input_dim)
col = sequence.input_variable(shape=input_dim)
rowh = Sequential([Embedding(opt.embed), Stabilizer(), Dropout(opt.dropout)])(row)
colh = Sequential([Embedding(opt.embed), Stabilizer(), Dropout(opt.dropout)])(col)
x = C.splice(rowh, colh, axis=-1)
x = lightlstm(opt.embed, opt.nhid)(x)
x = For(range(opt.layer-1), lambda: lightlstm(opt.nhid, opt.nhid))(x)
rowh = C.slice(x, -1, opt.nhid * 0, opt.nhid * 1)
colh = C.slice(x, -1, opt.nhid * 1, opt.nhid * 2)
row_predict = Sequential([Dropout(opt.dropout), Dense(input_dim)])(rowh)
col_predict = Sequential([Dropout(opt.dropout), Dense(input_dim)])(colh)
# variable : row label and col label
row_label = sequence.input_variable(shape=input_dim)
col_label = sequence.input_variable(shape=input_dim)
model = C.combine([row_predict, col_predict])
return {'row': row,
'col': col,
'row_label': row_label,
'col_label': col_label,
'model': model}
def ffnet():
inputs = 2
outputs = 2
layers = 2
hidden_dimension = 50
# input variables denoting the features and label data
features = C.input_variable((inputs), np.float32)
label = C.input_variable((outputs), np.float32)
# Instantiate the feedforward classification model
my_model = Sequential ([
Dense(hidden_dimension, activation=C.sigmoid),
Dense(outputs)])
z = my_model(features)
ce = C.cross_entropy_with_softmax(z, label)
pe = C.classification_error(z, label)
# Instantiate the trainer object to drive the model training
lr_per_minibatch = C.learning_parameter_schedule(0.125)
progress_printer = ProgressPrinter(0)
trainer = C.Trainer(z, (ce, pe), [sgd(z.parameters, lr=lr_per_minibatch)], [progress_printer])
# Get minibatches of training data and perform model training
minibatch_size = 25
num_minibatches_to_train = 1024
aggregate_loss = 0.0
for i in range(num_minibatches_to_train):
def create_multi_layer_neural_network(input_vars, out_dims, num_hidden_layers):
num_hidden_neurons = 128
hidden_layer = lambda: Dense(num_hidden_neurons, activation=cntk.ops.relu)
output_layer = Dense(out_dims, activation=None)
model = Sequential([LayerStack(num_hidden_layers, hidden_layer),
output_layer])(input_vars)
return model
def create_model(base_model_file, feature_node_name, last_hidden_node_name, num_classes, input_features, freeze=False):
# Load the pretrained classification net and find nodes
base_model = load_model(base_model_file)
feature_node = find_by_name(base_model, feature_node_name)
last_node = find_by_name(base_model, last_hidden_node_name)
# Clone the desired layers with fixed weights
cloned_layers = combine([last_node.owner]).clone(
CloneMethod.freeze if freeze else CloneMethod.clone,
{feature_node: placeholder(name='features')})
# Add new dense layer for class prediction
feat_norm = input_features - Constant(114)
cloned_out = cloned_layers(feat_norm)
z = Dense(num_classes, activation=None, name=new_output_node_name) (cloned_out)
return z
# Input variables denoting the features and label data
input_var = C.ops.input_variable((num_channels, image_height, image_width), np.float32)
label_var = C.ops.input_variable(num_output_classes, np.float32)
# Instantiate the feedforward classification model
scaled_input = C.ops.element_times(C.ops.constant(0.00390625), input_var)
with C.layers.default_options(activation=C.ops.relu, pad=False):
conv1 = C.layers.Convolution2D((5, 5), 32, pad=True)(scaled_input)
pool1 = C.layers.MaxPooling((3, 3), (2, 2))(conv1)
conv2 = C.layers.Convolution2D((3, 3), 48)(pool1)
pool2 = C.layers.MaxPooling((3, 3), (2, 2))(conv2)
conv3 = C.layers.Convolution2D((3, 3), 64)(pool2)
f4 = C.layers.Dense(96)(conv3)
drop4 = C.layers.Dropout(0.5)(f4)
z = C.layers.Dense(num_output_classes, activation=None)(drop4)
ce = C.losses.cross_entropy_with_softmax(z, label_var)
pe = C.metrics.classification_error(z, label_var)
# Load train data
reader_train = create_reader(os.path.join(data_dir, 'Train-28x28_cntk_text.txt'), True,
input_dim, num_output_classes, max_epochs * epoch_size)
# Load test data
reader_test = create_reader(os.path.join(data_dir, 'Test-28x28_cntk_text.txt'), False,
input_dim, num_output_classes, C.io.FULL_DATA_SWEEP)
# Set learning parameters
lr_per_sample = [0.001] * 10 + [0.0005] * 10 + [0.0001]
lr_schedule = C.learning_parameter_schedule_per_sample(lr_per_sample, epoch_size=epoch_size)
mms = [0] * 5 + [0.9990239141819757]
mm_schedule = C.learners.momentum_schedule_per_sample(mms, epoch_size=epoch_size)
def _create_model(net_input, num_output_classes, num_hidden_layers, hidden_layers_dim):
h = net_input
with C.layers.default_options(init=C.glorot_uniform()):
for i in range(num_hidden_layers):
h = C.layers.Dense(hidden_layers_dim,
activation=C.relu)(h)
return C.layers.Dense(num_output_classes, activation=None)(h)
def _create_model(x_local, h_dims):
"""Create the model for time series prediction"""
with C.layers.default_options(initial_state=0.1):
m = C.layers.Recurrence(C.layers.LSTM(h_dims))(x_local)
m = C.sequence.last(m)
m = C.layers.Dropout(0.2)(m)
m = C.layers.Dense(1)(m)
return m
cntk.ops.functions.CloneMethod.clone if retraining_type == 'all' \
else cntk.ops.functions.CloneMethod.freeze,
{feature_node: cntk.ops.placeholder()})
# Load the fully connected layers, freezing if desired
last_node = cntk.logging.graph.find_by_name(loaded_model, 'h2_d')
fully_connected_layers = cntk.ops.combine([last_node.owner]).clone(
cntk.ops.functions.CloneMethod.freeze if retraining_type == \
'last_only' else cntk.ops.functions.CloneMethod.clone,
{last_conv_node: cntk.ops.placeholder()})
# Define the network using the loaded layers
feat_norm = image_input - cntk.layers.Constant(114)
conv_out = conv_layers(feat_norm)
fc_out = fully_connected_layers(conv_out)
new_model = cntk.layers.Dense(shape=num_classes, name='last_layer')(fc_out)
return(new_model)