Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'name' : bn_layer.get_attr('name'),
'original_name' : bn_layer.get_attr('name'),
'class_name' : 'BatchNormalizationQuantizedTanh',
'n_in' : bn_layer.get_attr('n_in'),
'n_out' : bn_layer.get_attr('n_in'),
'n_filt' : bn_layer.get_attr('n_filt'),
'epsilon' : bn_layer.get_attr('epsilon'),
'quantize' : quantize
}
bnbt_layer = model.make_node('BatchNormalizationQuantizedTanh', 'bnbt_' + bn_layer.name, attrs, bn_layer.inputs)
# Replace the old BatchNormalization layer with this one
model.replace_node(bn_layer, bnbt_layer)
return True
class QuantizeDenseOutput(OptimizerPass):
def match(self, node):
is_match = (node.__class__.__name__ == 'Dense' and node.get_attr('quantize', default=0) > 1
and node.get_input_node().__class__.__name__ == 'BatchNormalizationQuantizedTanh')
return is_match
def transform(self, model, node):
# Compute the required precision and update the variables
# Number of bits for output is log2 of number of input nodes
# Since this is the number of uint<1>'s which are summed
nbits = int(np.ceil(np.log2(node.attributes['n_in'])) + 2)
out_type = 'ap_int<{}>'.format(nbits)
node.set_attr('accum_t', out_type)
out_var = node.get_output_variable()
out_var.type.precision = out_type
quantized_data = None
from ..optimizer import OptimizerPass
class EliminateLinearActivation(OptimizerPass):
def match(self, node):
return node.__class__.__name__ == 'Activation' and node.get_attr('activation') == 'linear'
def transform(self, model, node):
model.remove_node(node)
return True
batchnorm_quantized_tanh_config_template = """struct config{index} : nnet::batchnorm_quantized_tanh_config {{
static const unsigned n_in = {n_in};
static const unsigned n_filt = {n_filt};
static const unsigned io_type = nnet::{iotype};
static const unsigned reuse_factor = {reuse};
}};\n"""
batchnorm_quantized_tanh_function_template = 'nnet::normalize_{quantize}_tanh<{input_t}, {config}>({input}, {output}, {threshold});'
# Register the layer types to the layer map
hls_model.register_layer('BatchNormalizationQuantizedTanh', BatchNormalizationQuantizedTanh)
# Register the templates for config and function
templates.register_templates('BatchNormalizationQuantizedTanh', batchnorm_quantized_tanh_function_template, batchnorm_quantized_tanh_config_template)
class MergeBatchNormAndQuantizedTanh(OptimizerPass):
def match(self, node):
is_match = (node.__class__.__name__ == 'Activation'
and node.get_attr('activation') in ['binary_tanh', 'ternary_tanh']
and node.get_input_node().__class__.__name__ == 'BatchNormalization')
return is_match
def transform(self, model, node):
bn_layer = node.get_input_node()
# Remove the Activation layer
model.remove_node(node, rewire=True)
# Make a new layer with the new attributes
quantize = 0
if node.get_attr('activation') == 'binary_tanh':
quantize = 2
if node.get_attr('activation') == 'ternary_tanh':
quantize = 3