Skip to content

Commit

Permalink
isolate code_gen in namespace change
Browse files Browse the repository at this point in the history
  • Loading branch information
calad0i committed Oct 28, 2024
1 parent ab45708 commit d2009dd
Show file tree
Hide file tree
Showing 8 changed files with 403 additions and 36 deletions.
2 changes: 1 addition & 1 deletion example-models
4 changes: 2 additions & 2 deletions hls4ml/backends/fpga/fpga_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -727,7 +727,7 @@ def generate_conv1d_line_buffer_fn(self, layer_idx, n_partitions, in_W, in_C, ke

generated_code = (
"template<class data_T, typename CONFIG_T>\n"
"class fill_buffer_{index} : public FillConv1DBuffer<data_T, CONFIG_T> {{\n"
"class fill_buffer_{index} : public nnet::FillConv1DBuffer<data_T, CONFIG_T> {{\n"
" public:\n"
" static void fill_buffer(\n"
" data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n"
Expand Down Expand Up @@ -857,7 +857,7 @@ def generate_conv2d_line_buffer_fn(

generated_code = (
"template<class data_T, typename CONFIG_T>\n"
"class fill_buffer_{index} : public FillConv2DBuffer<data_T, CONFIG_T> {{\n"
"class fill_buffer_{index} : public nnet::FillConv2DBuffer<data_T, CONFIG_T> {{\n"
" public:\n"
" static void fill_buffer(\n"
" data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n"
Expand Down
1 change: 1 addition & 0 deletions hls4ml/backends/template.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def _default_config_params(self, layer):
params = self._default_params(layer)
params['iotype'] = layer.model.config.get_config_value('IOType')
params['reuse'] = layer.get_attr('reuse_factor')
params['namespace'] = layer.model.config.get_writer_config().get('Namespace', 'nnet')

return params

Expand Down
46 changes: 27 additions & 19 deletions hls4ml/backends/vivado/passes/convolution_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
typedef {bias_t.name} bias_t;
typedef {weight_t.name} weight_t;
template<class data_T, class res_T, class CONFIG_T>
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
using kernel = {dense_function}<data_T, res_T, CONFIG_T>;
template<class x_T, class y_T>
using product = nnet::product::{product_type}<x_T, y_T>;
}};\n"""
Expand Down Expand Up @@ -53,7 +53,7 @@
static const unsigned n_partitions = {n_partitions};
static const unsigned n_pixels = out_width / n_partitions;
template<class data_T, class CONFIG_T>
using fill_buffer = nnet::{fill_fn}<data_T, CONFIG_T>;
using fill_buffer = {fill_fn}<data_T, CONFIG_T>;
typedef {accum_t.name} accum_t;
typedef {bias_t.name} bias_t;
typedef {weight_t.name} weight_t;
Expand Down Expand Up @@ -89,9 +89,10 @@ def format(self, node):
params['scale_index_type'] = 'scale_index_regular'

if node.model.config.get_config_value('IOType') == 'io_parallel':
params['fill_fn'] = f'fill_buffer_{node.index}'
namespace = params['namespace']
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}'
else:
params['fill_fn'] = 'FillConv1DBuffer'
params['fill_fn'] = 'nnet::FillConv1DBuffer'

conv_config = self.template.format(**params)

Expand All @@ -103,16 +104,18 @@ def format(self, node):
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
)

namespace = params['namespace']

if node.get_attr('strategy').lower() == 'latency':
mult_params['dense_function'] = 'DenseLatency'
mult_params['dense_function'] = 'nnet::DenseLatency'
elif node.get_attr('strategy').lower() == 'resource':
if int(mult_params['reuse_factor']) <= int(mult_params['n_in']):
mult_params['dense_function'] = 'DenseResource_rf_leq_nin'
mult_params['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
else:
mult_params['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
mult_params['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
# The 3rd case is never used
elif node.get_attr('strategy').lower() == 'resource_unrolled':
mult_params['dense_function'] = f'dense_resource_unrolled_{node.index}'
mult_params['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}'

mult_config = self.mult_template.format(**mult_params)

Expand Down Expand Up @@ -170,7 +173,7 @@ def __init__(self):
static const unsigned n_partitions = {n_partitions};
static const unsigned n_pixels = out_height * out_width / n_partitions;
template<class data_T, class CONFIG_T>
using fill_buffer = nnet::{fill_fn}<data_T, CONFIG_T>;
using fill_buffer = {fill_fn}<data_T, CONFIG_T>;
typedef {accum_t.name} accum_t;
typedef {bias_t.name} bias_t;
typedef {weight_t.name} weight_t;
Expand Down Expand Up @@ -214,9 +217,10 @@ def format(self, node):
params['scale_index_width_type'] = 'scale_index_regular'

if node.model.config.get_config_value('IOType') == 'io_parallel':
params['fill_fn'] = f'fill_buffer_{node.index}'
namespace = params['namespace']
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}'
else:
params['fill_fn'] = 'FillConv2DBuffer'
params['fill_fn'] = 'nnet::FillConv2DBuffer'

conv_config = self.template.format(**params)

Expand Down Expand Up @@ -313,9 +317,10 @@ def format(self, node):
params['weight_t'] = node.get_weights('depthwise').type
params['bias_t'] = node.get_weights('zero_bias').type
if node.model.config.get_config_value('IOType') == 'io_parallel':
params['fill_fn'] = f'fill_buffer_{node.index}_dw'
namespace = params['namespace']
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_dw'
else:
params['fill_fn'] = 'FillConv1DBuffer'
params['fill_fn'] = 'nnet::FillConv1DBuffer'

if node.get_attr('unscaled'):
params['scale_index_type'] = 'scale_index_unscaled'
Expand Down Expand Up @@ -359,9 +364,10 @@ def format(self, node):
params['min_width'] = params['in_width']
params['instructions'] = '0'
if node.model.config.get_config_value('IOType') == 'io_parallel':
params['fill_fn'] = f'fill_buffer_{node.index}_pw'
namespace = params['namespace']
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_pw'
else:
params['fill_fn'] = 'FillConv1DBuffer'
params['fill_fn'] = 'nnet::FillConv1DBuffer'

if node.get_attr('unscaled'):
params['scale_index_type'] = 'scale_index_unscaled'
Expand Down Expand Up @@ -446,9 +452,10 @@ def format(self, node):
params['index'] = str(node.index) + '_depthwise'
params['weight_t'] = node.get_weights('depthwise').type
if node.model.config.get_config_value('IOType') == 'io_parallel':
params['fill_fn'] = f'fill_buffer_{node.index}_dw'
namespace = params['namespace']
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_dw'
else:
params['fill_fn'] = 'FillConv2DBuffer'
params['fill_fn'] = 'nnet::FillConv2DBuffer'

if node.get_attr('unscaled_h'):
params['scale_index_height_type'] = 'scale_index_unscaled'
Expand Down Expand Up @@ -500,9 +507,10 @@ def format(self, node):
params['min_width'] = params['in_width']
params['instructions'] = '0'
if node.model.config.get_config_value('IOType') == 'io_parallel':
params['fill_fn'] = f'fill_buffer_{node.index}_pw'
namespace = params['namespace']
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_pw'
else:
params['fill_fn'] = 'FillConv2DBuffer'
params['fill_fn'] = 'nnet::FillConv2DBuffer'

if node.get_attr('unscaled_h'):
params['scale_index_height_type'] = 'scale_index_unscaled'
Expand Down
12 changes: 7 additions & 5 deletions hls4ml/backends/vivado/passes/core_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
typedef {weight_t.name} weight_t;
typedef {index_t.name} index_t;
template<class data_T, class res_T, class CONFIG_T>
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
using kernel = {dense_function}<data_T, res_T, CONFIG_T>;
template<class x_T, class y_T>
using product = nnet::product::{product_type}<x_T, y_T>;
}};\n"""
Expand All @@ -43,16 +43,18 @@ def format(self, node):
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
)

namespace = params['namespace']

if node.get_attr('strategy').lower() == 'latency':
params['dense_function'] = 'DenseLatency'
params['dense_function'] = 'nnet::DenseLatency'
elif node.get_attr('strategy').lower() == 'resource':
if int(params['reuse_factor']) <= int(params['n_in']):
params['dense_function'] = 'DenseResource_rf_leq_nin'
params['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
else:
params['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
params['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
# The 3rd case is never used
elif node.get_attr('strategy').lower() == 'resource_unrolled':
params['dense_function'] = f'dense_resource_unrolled_{node.index}'
params['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}'

return self.template.format(**params)

Expand Down
20 changes: 11 additions & 9 deletions hls4ml/backends/vivado/passes/recurrent_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
typedef {bias_t.name} bias_t;
typedef {weight_t.name} weight_t;
template<class data_T, class res_T, class CONFIG_T>
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
using kernel = {dense_function}<data_T, res_T, CONFIG_T>;
template<class x_T, class y_T>
using product = nnet::product::{product_type}<x_T, y_T>;
}};\n"""
Expand Down Expand Up @@ -141,16 +141,18 @@ def format(self, node):
mult_params1['nzeros'] = node.get_weights('weight').nzeros
mult_params1['nonzeros'] = node.get_weights('weight').nonzeros

namespace = params['namespace']

if node.get_attr('strategy').lower() == 'latency':
mult_params1['dense_function'] = 'DenseLatency'
mult_params1['dense_function'] = 'nnet::DenseLatency'
elif node.get_attr('strategy').lower() == 'resource':
if int(mult_params1['reuse_factor']) <= int(mult_params1['n_in']):
mult_params1['dense_function'] = 'DenseResource_rf_leq_nin'
mult_params1['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
else:
mult_params1['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
mult_params1['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
# The 3rd case is never used
elif node.get_attr('strategy').lower() == 'resource_unrolled':
mult_params1['dense_function'] = f'dense_resource_unrolled_{node.index}_1'
mult_params1['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}_1'

if node.get_attr('return_sequences'):
mult_params2['n_in'] = node.get_output_variable().shape[1]
Expand All @@ -167,15 +169,15 @@ def format(self, node):
mult_params2['nonzeros'] = node.get_weights('recurrent_weight').nonzeros

if node.get_attr('strategy').lower() == 'latency':
mult_params2['dense_function'] = 'DenseLatency'
mult_params2['dense_function'] = 'nnet::DenseLatency'
elif node.get_attr('strategy').lower() == 'resource':
if int(mult_params2['reuse_factor']) <= int(mult_params2['n_in']):
mult_params2['dense_function'] = 'DenseResource_rf_leq_nin'
mult_params2['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
else:
mult_params2['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
mult_params2['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
# The 3rd case is never used
elif node.get_attr('strategy').lower() == 'resource_unrolled':
mult_params2['dense_function'] = f'dense_resource_unrolled_{node.index}_2'
mult_params2['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}_2'

mult_config1 = self.mult1_template.format(**mult_params1)
mult_config2 = self.mult2_template.format(**mult_params2)
Expand Down
4 changes: 4 additions & 0 deletions hls4ml/writer/vivado_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -790,6 +790,7 @@ def write_generated_code(self, model):
contents = f.readlines()
f.close()
f = open(path, 'w')
namespace = model.config.get_writer_config().get('Namespace', None)

for line in contents:
if '// hls4ml insert code' in line:
Expand All @@ -799,6 +800,9 @@ def write_generated_code(self, model):
newline += str(generated_code)
else:
newline = line
if namespace is not None:
if 'namespace nnet' in newline:
newline = newline.replace('namespace nnet', f'namespace {namespace}')
f.write(newline)
f.close()

Expand Down
Loading

0 comments on commit d2009dd

Please sign in to comment.