@@ -97,13 +97,26 @@ def is_layer_optimizable(self, layer_attributes):
9797 def layer_resources (self , layer_attributes ):
9898 if not layer_attributes .weight_shape :
9999 return [0 ]
100- else :
101- # TOOD - Extend for parallelisation factor
102- # For RF > 1, BRAM utilised by weights can be estimated by (bit_width * n_in * n_out) / (RF * 36)
100+
101+ # TOOD - Extend for parallelisation factor
102+ if layer_attributes . args [ 'hls4ml_attributes' ]. strategy . lower () == 'latency' :
103103 return [
104104 int (np .prod (layer_attributes .weight_shape ) // layer_attributes .args ['hls4ml_attributes' ].reuse_factor ),
105- int ( math . ceil ( np . prod ( layer_attributes . weight_shape ) * layer_attributes . args [ 'hls4ml_attributes' ]. weight_precision . width / ( layer_attributes . args [ 'hls4ml_attributes' ]. reuse_factor * 36 ))) ,
105+ 0 ,
106106 ]
107+ else :
108+ # Resource strategy, RF == 1 is similar to Latency strategy (but slower)
109+ if layer_attributes .args ['hls4ml_attributes' ].reuse_factor == 1 :
110+ return [
111+ int (np .prod (layer_attributes .weight_shape ) // layer_attributes .args ['hls4ml_attributes' ].reuse_factor ),
112+ 0 ,
113+ ]
114+ else :
115+ # For RF > 1, BRAM utilised by weights can be estimated by (bit_width * n_in * n_out) / (RF * 36)
116+ return [
117+ int (np .prod (layer_attributes .weight_shape ) // layer_attributes .args ['hls4ml_attributes' ].reuse_factor ),
118+ int (math .ceil (np .prod (layer_attributes .weight_shape ) * layer_attributes .args ['hls4ml_attributes' ].weight_precision .width / (layer_attributes .args ['hls4ml_attributes' ].reuse_factor * 36 ))),
119+ ]
107120
108121 @classmethod
109122 def layer_savings (self , layer_attributes ):
@@ -122,9 +135,9 @@ def layer_savings(self, layer_attributes):
122135 structure_type = layer_attributes .optimization_attributes .structure_type
123136 if layer_attributes .args ['hls4ml_attributes' ].strategy .lower () == 'latency' :
124137 if layer_attributes .args ['hls4ml_attributes' ].reuse_factor == 1 :
125- return [1 ]
138+ return [1 , 0 ]
126139 else :
127- return [0 ]
140+ return [0 , 0 ]
128141 else :
129142 if layer_attributes .args ['hls4ml_attributes' ].strategy .lower () == 'resource' and layer_attributes .args ['hls4ml_attributes' ].reuse_factor == 1 :
130143 return [1 , 0 ]
@@ -244,7 +257,6 @@ def layer_savings(self, layer_attributes):
244257 if structure_type == SUPPORTED_STRUCTURES .UNSTRUCTURED :
245258 return [layer_attributes .args ['hls4ml_attributes' ].weight_precision .width ]
246259 elif structure_type == SUPPORTED_STRUCTURES .STRUCTURED :
247- print ('here' )
248260 return [
249261 layer_attributes .args ['hls4ml_attributes' ].n_in * layer_attributes .args ['hls4ml_attributes' ].weight_precision .width +
250262 layer_attributes .args ['hls4ml_attributes' ].output_precision .width
0 commit comments