Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
val_var = self.graph.get_input_node(node, idx=4, copy=True)
self.omit_nodes.append(val_scale.layer_name)
self.omit_nodes.append(val_b.layer_name)
self.omit_nodes.append(val_mean.layer_name)
self.omit_nodes.append(val_var.layer_name)
momentum = node.get_attr('momentum', .9)
epsilon = node.get_attr('epsilon', 1e-5)
# Attribute: spatial is used in BatchNormalization-1,6,7
spatial = bool(node.get_attr('spatial'))
attr = {
"momentum": momentum,
"epsilon": epsilon,
"data_layout": string('NCHW'),
"is_test": True,
"param_attr": string(val_scale.layer_name),
"bias_attr": string(val_b.layer_name),
"moving_mean_name": string(val_mean.layer_name),
"moving_variance_name": string(val_var.layer_name),
"use_global_stats": spatial,
"name": string(node.layer_name)
}
node.fluid_code.add_layer("batch_norm",
inputs=val_x,
output=node,
param_attr=attr)
if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER":
input_shape = val_x.out_shapes[0]
pad_h = get_same_padding(input_shape[2], kernel_shape[0],
strides[0])
pad_w = get_same_padding(input_shape[3], kernel_shape[1],
strides[1])
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
attr = {
"pool_size": kernel_shape,
"pool_type": string("max"),
"pool_stride": strides,
"pool_padding": paddings,
"ceil_mode": ceil_mode,
"name": string(node.layer_name),
"exclusive": False
}
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
def create_parameter(self, node, parameter=None):
if parameter is not None:
node = parameter
dtype = node.dtype
shape = node.out_shapes[0]
self.weights[node.layer_name] = node.weight
attr = {
'dtype': string(dtype),
'shape': shape,
'name': string(node.layer_name),
'attr': string(node.layer_name),
'default_initializer': 'Constant(0.0)'
}
node.fluid_code.add_layer("create_parameter",
inputs=None,
output=node,
param_attr=attr)
def place_holder(self, node):
self.input_shapes.append(node.out_shapes[0])
attr = {
"dtype": string(node.dtype),
"shape": node.out_shapes[0],
"name": string(node.layer_name),
"append_batch_size": 'False'
}
node.fluid_code.add_layer("data",
inputs=None,
output=node,
param_attr=attr)
elif len(pads) == 8:
paddings = np.array(pads).reshape(
(-1, 4)).transpose().flatten().tolist() # SSEE -> SESE
if sum(paddings[:4]) == 0:
fluid_op = 'pad2d'
paddings = paddings[4:]
attr['mode'] = string(mode)
attr['paddings'] = paddings
if op_independent:
attr['name'] = string(node.layer_name)
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
else:
attr['name'] = string(node.layer_name + '_paded')
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node.layer_name + '_paded',
param_attr=attr)
return node.layer_name + '_paded'
if '_' in mapped_attrs:
mapped_attrs.pop('_')
fluid_attrs = default_attrs.copy()
fluid_attrs.update(mapped_attrs)
inputs = inputs if input_perm is None else list(
map(lambda i: inputs[i], input_perm))
val_inps = []
for idx, ipt in enumerate(inputs):
val_inps.append(self.graph.get_input_node(node, idx=idx, copy=True))
val_outs = outputs if output_perm is None else list(
map(lambda i: outputs[i], output_perm))
attr = fluid_attrs
assert len(val_inps) == 1, 'directly_map error with multi inputs'
if fluid_op not in ['shape']:
attr['name'] = string(node.layer_name)
node.fluid_code.add_layer(fluid_op,
inputs=val_inps[0],
output=val_outs[0],
param_attr=attr)
if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER":
pad_h = get_same_padding(input_shape[2], kernel_shape[0],
strides[0])
pad_w = get_same_padding(input_shape[3], kernel_shape[1],
strides[1])
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
attr = {
"num_filters": num_out_channels,
"filter_size": kernel_shape,
"stride": strides,
"padding": paddings,
"dilation": dilations,
"groups": num_groups,
'param_attr': string(val_w.layer_name),
"name": string(node.layer_name)
}
if has_bias:
attr["bias_attr"] = string(val_b.layer_name)
else:
attr["bias_attr"] = False
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
def GlobalAveragePool(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_node(node.layer.output[0], copy=True)
input_shape = val_x.out_shapes[0]
output_shape = val_y.out_shapes[0]
assert input_shape is not None or output_shape is not None, 'poolnd not inferred' # N
if input_shape:
poolnd = len(input_shape) - 2 # NC...
elif output_shape:
poolnd = len(output_shape) - 2 # NC...
assert 2 <= poolnd <= 3, 'only pool2d and pool3d is supported'
fluid_op = 'pool{}d'.format(poolnd)
attr = {
"pool_type": string("avg"),
"global_pooling": True,
"name": string(node.layer_name)
}
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
pads = node.get_attr('pads')
mode = node.get_attr('mode', 'constant')
value = node.get_attr('value', 0.)
data_shape = val_x.out_shapes[0]
output_shape = node.out_shapes[0]
assume_pad2d = False
attr = {}
if len(pads) == 4:
assume_pad2d |= mode != 'constant'
if data_shape:
assume_pad2d |= data_shape and len(data_shape) == 4 # NCHW
if output_shape:
assume_pad2d |= output_shape and len(output_shape) == 4 # NCHW
if assume_pad2d:
fluid_op = 'pad2d'
attr['data_format'] = string('NCHW')
attr['mode'] = string(mode)
else:
attr = {'pad_value': value}
fluid_op = 'pad'
if len(pads) == 4:
paddings = np.array(pads).reshape(
(-1, 2)).transpose().flatten().tolist() # SSEE -> SESE
elif len(pads) == 8:
paddings = np.array(pads).reshape(
(-1, 4)).transpose().flatten().tolist() # SSEE -> SESE
if sum(paddings[:4]) == 0:
fluid_op = 'pad2d'
paddings = paddings[4:]
attr['mode'] = string(mode)
attr['paddings'] = paddings
if op_independent: