|
14 | 14 | from hls4ml.backends import FPGABackend |
15 | 15 | from hls4ml.backends.fpga.fpga_types import APTypeConverter, HLSTypeConverter, VivadoArrayVariableConverter |
16 | 16 | from hls4ml.report import parse_vivado_report |
| 17 | +from hls4ml.utils.fixed_point_utils import ceil_log2 |
17 | 18 |
|
18 | 19 | class VivadoBackend(FPGABackend): |
19 | 20 | def __init__(self): |
@@ -129,7 +130,7 @@ def build(self, model, reset=False, csim=True, synth=True, cosim=False, validati |
129 | 130 |
|
130 | 131 | def _validate_conv_strategy(self, layer): |
131 | 132 | if layer.model.config.model_strategy.lower() != 'resource': |
132 | | - print('WARNING: Cannot use "Latency" model strategy for {} layer. Switching to "Resource" strategy.') |
| 133 | + print(f'WARNING: Cannot use "Latency" model strategy for {layer.name} layer. Switching to "Resource" strategy.') |
133 | 134 | layer.model.config.model_strategy = 'Resource' |
134 | 135 |
|
135 | 136 | @layer_optimizer(Layer) |
@@ -251,6 +252,36 @@ def init_depconv2d(self, layer): |
251 | 252 | layer.set_attr('n_partitions', 1) #TODO Once we have SeparableConv implementation for io_parallel this should be set properly |
252 | 253 | layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) |
253 | 254 |
|
| 255 | + def _set_pooling_accum_t(self, layer, pool_size): |
| 256 | + extra_bits = ceil_log2(pool_size) |
| 257 | + accum_t = layer.get_attr('accum_t') |
| 258 | + accum_t.precision.fractional += extra_bits |
| 259 | + accum_t.precision.integer += extra_bits |
| 260 | + |
| 261 | + @layer_optimizer(Pooling1D) |
| 262 | + def init_pooling1d(self, layer): |
| 263 | + pool_size = layer.get_attr('pool_width') |
| 264 | + self._set_pooling_accum_t(layer, pool_size) |
| 265 | + |
| 266 | + layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) |
| 267 | + |
| 268 | + @layer_optimizer(Pooling2D) |
| 269 | + def init_pooling2d(self, layer): |
| 270 | + pool_size = layer.get_attr('pool_height') * layer.get_attr('pool_width') |
| 271 | + self._set_pooling_accum_t(layer, pool_size) |
| 272 | + |
| 273 | + layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) |
| 274 | + |
| 275 | + @layer_optimizer(GlobalPooling1D) |
| 276 | + def init_global_pooling1d(self, layer): |
| 277 | + pool_size = layer.get_attr('n_in') |
| 278 | + self._set_pooling_accum_t(layer, pool_size) |
| 279 | + |
| 280 | + @layer_optimizer(GlobalPooling2D) |
| 281 | + def init_global_pooling2d(self, layer): |
| 282 | + pool_size = layer.get_attr('in_height') * layer.get_attr('in_width') |
| 283 | + self._set_pooling_accum_t(layer, pool_size) |
| 284 | + |
254 | 285 | @layer_optimizer(Activation) |
255 | 286 | def init_activation(self, layer): |
256 | 287 | if 'table_t' not in layer.attributes: |
|
0 commit comments