1- import hls4ml
21import numpy as np
2+
3+ import hls4ml
4+ from hls4ml .model .types import FixedPrecisionType , IntegerPrecisionType
35from hls4ml .optimization .config import SUPPORTED_STRUCTURES
46from hls4ml .optimization .keras .config import SUPPORTED_LAYERS
5- from hls4ml . model . types import FixedPrecisionType , IntegerPrecisionType
7+
68
79class hls4mlAttributes :
810 '''
@@ -18,19 +20,22 @@ class hls4mlAttributes:
1820 - reuse_factor (int): Layer reuse factor
1921 - parallelization_factor (int): Layer parallelization factor - [applicable to io_parallel Conv2D]
2022 '''
21- def __init__ (self , n_in , n_out , io_type , strategy , weight_precision , output_precision , reuse_factor , parallelization_factor = 1 ):
23+
24+ def __init__ (
25+ self , n_in , n_out , io_type , strategy , weight_precision , output_precision , reuse_factor , parallelization_factor = 1
26+ ):
2227 if not isinstance (weight_precision , (FixedPrecisionType , IntegerPrecisionType )):
2328 raise Exception ('Layer weight precision is not in valid format' )
24-
29+
2530 if not isinstance (output_precision , (FixedPrecisionType , IntegerPrecisionType )):
2631 raise Exception ('Layer weight precision is not in valid format' )
2732
2833 if not strategy in ('Latency' , 'latency' , 'Resource' , 'resource' ):
2934 raise Exception ('Unknown layer strategy' )
30-
35+
3136 if not io_type in ('io_parallel' , 'io_stream' ):
3237 raise Exception ('Unknown IO type' )
33-
38+
3439 self .n_in = n_in
3540 self .n_out = n_out
3641 self .io_type = io_type
@@ -40,37 +45,48 @@ def __init__(self, n_in, n_out, io_type, strategy, weight_precision, output_prec
4045 self .reuse_factor = reuse_factor
4146 self .parallelization_factor = parallelization_factor
4247
48+
4349class OptimizationAttributes :
4450 '''
4551 A class for storing layer optimization attributes
46-
52+
4753 Args:
4854 - structure_type (enum): Targeted structure - unstructured, structured, pattern, block
4955 - pruning (boolean): Should pruning be applied to the layer
5056 - weight_sharing (boolean): Should weight sharing be applied to the layer
5157 - block_shape (tuple): Block shape if structure_type == block
52- - pattern_offset (int): Length of each pattern if structure_type == pattern
53- - consecutive_patterns (int): How many consecutive patterns are grouped together if structure_type == pattern
54-
58+ - pattern_offset (int): Length of each pattern if structure_type == pattern
59+ - consecutive_patterns (int): How many consecutive patterns are grouped together if structure_type == pattern
60+
5561 Notes:
5662 - In the case of hls4ml, pattern_offset is equivalent to the number of weights processed in parallel
5763 - The pattern_offset is n_in * n_out / reuse_factor; default case (=1) is equivalent to no unrolling
5864 '''
59- def __init__ (self , structure_type = SUPPORTED_STRUCTURES .UNSTRUCTURED , pruning = False , weight_sharing = False , block_shape = (1 , 1 ), pattern_offset = 1 , consecutive_patterns = 1 ):
65+
66+ def __init__ (
67+ self ,
68+ structure_type = SUPPORTED_STRUCTURES .UNSTRUCTURED ,
69+ pruning = False ,
70+ weight_sharing = False ,
71+ block_shape = (1 , 1 ),
72+ pattern_offset = 1 ,
73+ consecutive_patterns = 1 ,
74+ ):
6075 if not isinstance (structure_type , SUPPORTED_STRUCTURES ):
6176 raise Exception (f'{ self .__class__ .__name__ } unknown structure type' )
62-
77+
6378 self .structure_type = structure_type
6479 self .pruning = pruning
6580 self .weight_sharing = weight_sharing
6681 self .block_shape = block_shape
6782 self .pattern_offset = pattern_offset
6883 self .consecutive_patterns = consecutive_patterns
6984
85+
7086class LayerAttributes :
7187 '''
7288 A class for storing layer information
73-
89+
7490 Args:
7591 - name (string): Layer name
7692 - layer_type (keras.Layer): Layer type (e.g. Dense, Conv2D etc.)
@@ -82,77 +98,99 @@ class LayerAttributes:
8298 - optimization_attributes (OptimizationAttributes): Type of optimization, optimization vs weight sharing, block shape and pattern offset
8399 - args (dict): Additional information, e.g. hls4mlAttributes; dictionary so it can be generic enough for different platforms
84100 '''
85- def __init__ (self , name , layer_type , inbound_layers , weight_shape , input_shape ,output_shape ,optimizable , optimization_attributes , args ):
101+
102+ def __init__ (
103+ self ,
104+ name ,
105+ layer_type ,
106+ inbound_layers ,
107+ weight_shape ,
108+ input_shape ,
109+ output_shape ,
110+ optimizable ,
111+ optimization_attributes ,
112+ args ,
113+ ):
86114 self .name = name
87115 self .layer_type = layer_type
88116 self .inbound_layers = inbound_layers
89117 self .weight_shape = weight_shape
90118 self .input_shape = input_shape
91119 self .output_shape = output_shape
92120 self .optimizable = optimizable
93- self .optimization_attributes = optimization_attributes
121+ self .optimization_attributes = optimization_attributes
94122 self .args = args
95123
96124 def update_args (self , updates ):
97125 self .args .update (updates )
98126
99127 def __str__ (self ):
100- return f'name: { self .name } , ' \
101- f'layer_type: { self .layer_type } , ' \
102- f'inbound_layers: { self .inbound_layers } , ' \
103- f'weight_shape: { self .weight_shape } , ' \
104- f'input_shape: { self .input_shape } , ' \
105- f'output_shape: { self .output_shape } , ' \
106- f'optimizable: { self .optimizable } , ' \
107- f'optimization_attributes: { self .optimization_attributes } , ' \
108- f'args: { self .args } , ' \
128+ return (
129+ f'name: { self .name } , '
130+ f'layer_type: { self .layer_type } , '
131+ f'inbound_layers: { self .inbound_layers } , '
132+ f'weight_shape: { self .weight_shape } , '
133+ f'input_shape: { self .input_shape } , '
134+ f'output_shape: { self .output_shape } , '
135+ f'optimizable: { self .optimizable } , '
136+ f'optimization_attributes: { self .optimization_attributes } , '
137+ f'args: { self .args } , '
138+ )
139+
109140
110141def get_attributes_from_keras_model (model ):
111142 '''
112143 Given a Keras model, builds a dictionary of class attributes
113144 Additional arguments (e.g. reuse factor), depend on the target hardware platform and are inserted later
114145 Per-layer pruning sype (structured, pattern etc.), depend on the pruning objective and are inserted later
115-
146+
116147 Args:
117148 - model (keras.model): Model to extract attributes from
118149
119150 Return:
120- - model_attributes (dict): Each key corresponds to a layer name, values are instances of LayerAttribute
151+ - model_attributes (dict): Each key corresponds to a layer name, values are instances of LayerAttribute
121152 '''
122- is_sequential = model .__class__ .__name__ == 'Sequential'
153+ is_sequential = model .__class__ .__name__ == 'Sequential'
123154 model_attributes = {}
124155
125156 for i , layer in enumerate (model .layers ):
126157 inbound_layers = []
127158 if is_sequential and i > 0 :
128- inbound_layers .append (model .layers [i - 1 ])
159+ inbound_layers .append (model .layers [i - 1 ])
129160 elif not is_sequential :
130161 nodes = model .get_config ()['layers' ][i ]['inbound_nodes' ]
131162 if len (nodes ) > 0 :
132163 inbound_layers .append (node [0 ] for node in nodes [0 ])
133-
164+
134165 layer_weights = layer .get_weights ()
135166 weight_shape = layer_weights [0 ].shape if len (layer_weights ) > 0 else ()
136167
137168 model_attributes [layer .name ] = LayerAttributes (
138- layer .name , layer .__class__ , inbound_layers ,
139- weight_shape , layer .input_shape [1 :], layer .output_shape [1 :],
140- False , OptimizationAttributes (), {}
169+ layer .name ,
170+ layer .__class__ ,
171+ inbound_layers ,
172+ weight_shape ,
173+ layer .input_shape [1 :],
174+ layer .output_shape [1 :],
175+ False ,
176+ OptimizationAttributes (),
177+ {},
141178 )
142-
179+
143180 return model_attributes
144181
182+
145183def get_attributes_from_keras_model_and_hls4ml_config (model , config ):
146184 '''
147185 Given a Keras model and hls4ml configuration, builds a dictionary of class attributes
148186 Per-layer pruning sype (structured, pruning etc.), depend on the pruning objective and are inserted later
149-
187+
150188 Args:
151189 - model (keras.model): Model to extract attributes from
152- - config (dict): hls4ml dictionary
153-
190+ - config (dict): hls4ml dictionary
191+
154192 Return:
155- - model_attributes (dict): Each key corresponds to a layer name, values are LayerAttribute instances
193+ - model_attributes (dict): Each key corresponds to a layer name, values are LayerAttribute instances
156194 '''
157195
158196 # Extract Keras attributes
@@ -163,30 +201,32 @@ def get_attributes_from_keras_model_and_hls4ml_config(model, config):
163201 default_reuse_factor = config ['Model' ]['ReuseFactor' ]
164202 default_strategy = config ['Model' ]['Strategy' ]
165203 default_precision = config ['Model' ]['Precision' ]
166-
204+
167205 # Build dictionary
168206 for layer in model_attributes :
169207 if model_attributes [layer ].layer_type in SUPPORTED_LAYERS :
170208 n_in , n_out = __get_layer_mult_size (model_attributes [layer ])
171209 layer_config = config ['LayerName' ][layer ] if layer in config ['LayerName' ] else {}
172- reuse_factor = layer_config ['ReuseFactor' ] if 'ReuseFactor' in layer_config else default_reuse_factor
173- parallelization_factor = layer_config ['ParallelizationFactor' ] if 'ParallelizationFactor' in layer_config else 1
210+ reuse_factor = layer_config ['ReuseFactor' ] if 'ReuseFactor' in layer_config else default_reuse_factor
211+ parallelization_factor = layer_config ['ParallelizationFactor' ] if 'ParallelizationFactor' in layer_config else 1
174212 strategy = layer_config ['Strategy' ] if 'Strategy' in layer_config else default_strategy
175- weight_precision = layer_config ['Precision' ]['weight' ] if 'weight' in layer_config ['Precision' ] else default_precision
213+ weight_precision = (
214+ layer_config ['Precision' ]['weight' ] if 'weight' in layer_config ['Precision' ] else default_precision
215+ )
176216 weight_precision = hls4ml .backends .fpga .fpga_backend .FPGABackend .convert_precision_string (weight_precision )
177- output_precision = layer_config ['Precision' ]['result' ] if 'result' in layer_config ['Precision' ] else default_precision
217+ output_precision = (
218+ layer_config ['Precision' ]['result' ] if 'result' in layer_config ['Precision' ] else default_precision
219+ )
178220 output_precision = hls4ml .backends .fpga .fpga_backend .FPGABackend .convert_precision_string (output_precision )
179-
221+
180222 hls4ml_attributes = hls4mlAttributes (
181- n_in , n_out ,
182- io_type , strategy ,
183- weight_precision , output_precision ,
184- reuse_factor , parallelization_factor
223+ n_in , n_out , io_type , strategy , weight_precision , output_precision , reuse_factor , parallelization_factor
185224 )
186225 model_attributes [layer ].update_args ({'hls4ml_attributes' : hls4ml_attributes })
187-
226+
188227 return model_attributes
189228
229+
190230def __get_layer_mult_size (attributes ):
191231 '''
192232 Helper function to calculate layer multiplication size
0 commit comments