Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
num_gt_boxes = 50
gt_boxes_shape_cntk = (num_gt_boxes,5)
im_info = [1000, 1000, 1]
# Create input tensors with values
rpn_cls_score_dummy = np.random.random_sample(rpn_cls_score_shape_cntk).astype(np.float32)
x1y1 = np.random.random_sample((num_gt_boxes, 2)) * 500
wh = np.random.random_sample((num_gt_boxes, 2)) * 400
x2y2 = x1y1 + wh + 50
label = np.random.random_sample((num_gt_boxes, 1))
label = (label * 17.0)
gt_boxes = np.hstack((x1y1, x2y2, label)).astype(np.float32)
# Create CNTK layer and call forward
rpn_cls_score_var = input_variable(rpn_cls_score_shape_cntk)
gt_boxes_var = input_variable(gt_boxes_shape_cntk)
cntk_layer = user_function(CntkAnchorTargetLayer(rpn_cls_score_var, gt_boxes_var, cntk.constant(im_info, (3,)), deterministic=True))
state, cntk_output = cntk_layer.forward({rpn_cls_score_var: [rpn_cls_score_dummy], gt_boxes_var: [gt_boxes]})
obj_key = [k for k in cntk_output if 'objectness_target' in str(k)][0]
bbt_key = [k for k in cntk_output if 'rpn_bbox_target' in str(k)][0]
bbw_key = [k for k in cntk_output if 'rpn_bbox_inside_w' in str(k)][0]
cntk_objectness_target = cntk_output[obj_key][0]
cntk_bbox_targets = cntk_output[bbt_key][0]
cntk_bbox_inside_w = cntk_output[bbw_key][0]
# Create Caffe layer and call forward
bottom = [np.array(rpn_cls_score_dummy),np.array(gt_boxes), np.array(im_info)]
top = None # handled through return statement in caffe layer for unit testing
def train(reader_train, epoch_size, max_epochs):
set_computation_network_trace_level(0)
input_var = input_variable((num_channels, image_height, image_width))
label_var = input_variable((num_classes))
z = create_model(input_var, 3, num_classes) # 3 for 20-layer, 8 for 50-layer
lr_per_mb = [0.001]+[0.01]*80+[0.001]*40+[0.0001]
# loss and metric
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
minibatch_size = 16
momentum_time_constant = -minibatch_size/np.log(0.9)
l2_reg_weight = 0.0001
lr_per_sample = [lr/minibatch_size for lr in lr_per_mb]
lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample)
mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant)
model = model_definition_function(
shape_of_inputs,
self._num_actions,
C.losses.cross_entropy_with_softmax,
use_placeholder_for_input=True)
except ValueError:
raise ValueError(
'Unknown representation for policy: "{0}"'
'\n'.format(self._parameters.policy_representation))
self._policy_network = model['f']
self._policy_network.replace_placeholder(self._input_variables)
self._policy_network_output_variables = model['outputs']
# The weight is computed as part of the Actor-Critic algorithm.
self._policy_network_weight_variables = \
C.ops.input_variable(shape=(1,), dtype=np.float32)
self._policy_network_loss = \
model['loss'] * self._policy_network_weight_variables
# Initialized from a saved model.
if self._parameters.initial_policy_network:
self._policy_network.restore(
self._parameters.initial_policy_network)
print("Parameterized the agent's policy using neural networks "
'"{0}" with {1} actions.\n'
''.format(self._parameters.policy_representation,
self._num_actions))
# Set up value network.
if self._parameters.shared_representation:
# For shared representation, policy pi and value function V share
Args:
shape_of_inputs: tuple of array (input) dimensions.
number_of_outputs: dimension of output, equals the number of
possible actions.
loss_function: if not specified, use squared loss by default.
use_placeholder_for_input: if true, inputs have to be replaced
later with actual input_variable.
Returns: a Python dictionary with string-valued keys including
'inputs', 'outputs', 'loss' and 'f'.
"""
# input/output
inputs = C.ops.placeholder(shape=shape_of_inputs) \
if use_placeholder_for_input \
else C.ops.input_variable(shape=shape_of_inputs, dtype=np.float32)
outputs = C.ops.input_variable(
shape=(number_of_outputs,), dtype=np.float32)
# network structure
centered_inputs = inputs - 128
scaled_inputs = centered_inputs / 256
with C.layers.default_options(activation=C.ops.relu):
q = C.layers.Sequential([
C.layers.Convolution((8, 8), 32, strides=4),
C.layers.Convolution((4, 4), 64, strides=2),
C.layers.Convolution((3, 3), 64, strides=2),
C.layers.Dense((512,)),
C.layers.Dense(number_of_outputs, activation=None)
])(scaled_inputs)
if loss_function is None:
def create_model():
# Source and target inputs to the model
batch_axis = Axis.default_batch_axis()
input_seq_axis = Axis('inputAxis')
label_seq_axis = Axis('labelAxis')
input_dynamic_axes = [batch_axis, input_seq_axis]
raw_input = input_variable(
shape=(input_vocab_dim), dynamic_axes=input_dynamic_axes, name='raw_input')
label_dynamic_axes = [batch_axis, label_seq_axis]
raw_labels = input_variable(
shape=(label_vocab_dim), dynamic_axes=label_dynamic_axes, name='raw_labels')
# Instantiate the sequence to sequence translation model
input_sequence = raw_input
# Drop the sentence start token from the label, for decoder training
label_sequence = slice(raw_labels, label_seq_axis,
1, 0, name='label_sequence') # <s> A B C </s> --> A B C
label_sentence_start = sequence.first(raw_labels) # <s>
# Setup primer for decoder
is_first_label = sequence.is_first(label_sequence) # 1 0 0 0 ...
label_sentence_start_scattered = sequence.scatter(
label_sentence_start, is_first_label)
# Encoder</s>
def convnet_mnist():
# Set global device type.
cpu = C.DeviceDescriptor.cpu_device()
try_set_default_device(cpu, acquire_device_lock=False)
# Define data.
image_height = 28
image_width = 28
num_channels = 1
input_shape = (num_channels, image_height, image_width)
input_dimensions = image_height * image_width * num_channels
num_output_classes = 10
# Input variables denoting the features and label data
input_var = C.ops.input_variable(input_shape, np.float32)
label_var = C.ops.input_variable(num_output_classes, np.float32)
# Instantiate the feedforward classification model
scaled_input = C.ops.element_times(C.ops.constant(0.00390625), input_var)
# setup initializer
init = uniform(scale= 0.1, seed=32)
with C.layers.default_options(activation=C.ops.relu, pad=False):
conv1 = C.layers.Convolution2D((5,5), 32, init=init, bias=False, pad=True)(scaled_input)
pool1 = C.layers.MaxPooling((3,3), (2,2))(conv1)
conv2 = C.layers.Convolution2D((3,3), 48, init=init, bias=False)(pool1)
pool2 = C.layers.MaxPooling((3,3), (2,2))(conv2)
conv3 = C.layers.Convolution2D((3,3), 64, init=init, bias=False)(pool2)
dense4 = C.layers.Dense(96, init=init, bias=False)(conv3)
drop4 = C.layers.Dropout(0.5, seed=32)(dense4)
def train_and_evaluate(reader_train, reader_test, max_epochs):
# Input variables denoting the features and label data
input_var = input_variable((num_channels, image_height, image_width))
label_var = input_variable((num_classes))
# Normalize the input
feature_scale = 1.0 / 256.0
input_var_norm = element_times(feature_scale, input_var)
# apply model to input
z = create_resnet_model(input_var_norm, 10)
#
# Training action
#
# loss and metric
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
def train_and_evaluate(reader_train, reader_test, max_epochs):
# Input variables denoting the features and label data
input_var = input_variable((num_channels, image_height, image_width))
label_var = input_variable((num_classes))
# Normalize the input
feature_scale = 1.0 / 256.0
input_var_norm = element_times(feature_scale, input_var)
# apply model to input
z = create_resnet_model(input_var_norm, 10)
#
# Training action
#
# loss and metric
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
def simple_mnist(debug_output=False):
input_dim = 784
num_output_classes = 10
num_hidden_layers = 1
hidden_layers_dim = 200
# Input variables denoting the features and label data
input = input_variable(input_dim, np.float32)
label = input_variable(num_output_classes, np.float32)
# Instantiate the feedforward classification model
scaled_input = element_times(constant(0.00390625), input)
netout = fully_connected_classifier_net(
scaled_input, num_output_classes, hidden_layers_dim, num_hidden_layers, relu)
ce = cross_entropy_with_softmax(netout, label)
pe = classification_error(netout, label)
communicator = distributed.communicator()
workers = communicator.workers()
current_worker = communicator.current_worker()
print("List all distributed workers")
for wk in workers:
if current_worker.global_rank == wk.global_rank: