Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
params = self._default_config_params()
params['n_in'] = self.get_input_variable().size_cpp()
return self._config_template.format(**params)
batchnorm_quantized_tanh_config_template = """struct config{index} : nnet::batchnorm_quantized_tanh_config {{
static const unsigned n_in = {n_in};
static const unsigned n_filt = {n_filt};
static const unsigned io_type = nnet::{iotype};
static const unsigned reuse_factor = {reuse};
}};\n"""
batchnorm_quantized_tanh_function_template = 'nnet::normalize_{quantize}_tanh<{input_t}, {config}>({input}, {output}, {threshold});'
# Register the layer types to the layer map
hls_model.register_layer('BatchNormalizationQuantizedTanh', BatchNormalizationQuantizedTanh)
# Register the templates for config and function
templates.register_templates('BatchNormalizationQuantizedTanh', batchnorm_quantized_tanh_function_template, batchnorm_quantized_tanh_config_template)
class MergeBatchNormAndQuantizedTanh(OptimizerPass):
def match(self, node):
is_match = (node.__class__.__name__ == 'Activation'
and node.get_attr('activation') in ['binary_tanh', 'ternary_tanh']
and node.get_input_node().__class__.__name__ == 'BatchNormalization')
return is_match
def transform(self, model, node):
bn_layer = node.get_input_node()
# Remove the Activation layer
model.remove_node(node, rewire=True)
# Make a new layer with the new attributes