Skip to content
Merged
Changes from 7 commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
11601cd
Added automatic inference of `param_t` constant for parametrised acti…
nghielme Dec 3, 2024
72026fb
pre-commit fixes
nghielme Dec 4, 2024
10ec7a2
Fix the case the param is a power of 2
nghielme Dec 4, 2024
29f0831
Fix for a specific case related to no bits in the mantissa
nghielme Dec 4, 2024
ecf5c2c
Merge branch 'main' into leaky_relu_quant_alpha
nghielme Dec 5, 2024
49e5a75
Merge branch 'main' into leaky_relu_quant_alpha
nghielme Dec 9, 2024
baba0f3
Merge branch 'main' into leaky_relu_quant_alpha
JanFSchulte Dec 16, 2024
0808580
Merge branch 'main' into leaky_relu_quant_alpha
nghielme Dec 19, 2024
79f7372
Update subproject commit reference in example-models
nghielme Jan 25, 2025
a4f5fa5
Merge branch 'main' into leaky_relu_quant_alpha
nghielme Jan 29, 2025
a39b3ef
Merge remote-tracking branch 'upstream/main' into leaky_relu_quant_alpha
jmitrevs Jul 24, 2025
463acf7
first, untested version of constant precison
jmitrevs Jul 25, 2025
a8641d9
try using Fxp for precision setting
jmitrevs Jul 26, 2025
8442c37
fix bug in max attribute of unsigned FixedPrecisionType
jmitrevs Jul 26, 2025
c7a6051
add unit test for precision from constant
jmitrevs Jul 26, 2025
38e443c
Merge branch 'main' into leaky_relu_quant_alpha
JanFSchulte Jul 29, 2025
f8315d2
Merge branch 'main' into leaky_relu_quant_alpha
jmitrevs Sep 7, 2025
15eefe9
integrate suggested test_precision_from_constant_unit change
jmitrevs Sep 7, 2025
d2cdb64
Merge remote-tracking branch 'upstream/main' into leaky_relu_quant_alpha
jmitrevs Sep 8, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 28 additions & 8 deletions hls4ml/model/optimizer/passes/infer_precision.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import math
import struct
from typing import Iterable

import numpy as np
Expand Down Expand Up @@ -561,15 +562,34 @@ def _infer_rnn_precision(self, node, types_to_infer):

return inferred_types

def _infer_par_act_precision(self, node, types_to_infer):
def _infer_const_precision(self, node, type_to_infer, attr_name):
inferred_types = []

# For threshold relu, set the parameter precision to be the input precision by default;
# for other parametrized activations, just allow the default precision to be used.
# Can override these values in the configuration by explicitly setting them.
if 'param_t' in inferred_types and self.get_attr('activation').lower() == 'thresholdedrelu':
in_type = node.get_input_variable().type.precision
node.attributes['param_t'].type = in_type
inferred_types.append('param_t')
def get_man_exp(f):
f = np.abs(f)
s = struct.pack('>f', f)
l_float = struct.unpack('>l', s)[0]
bits = f'{l_float:032b}'
m = bits[-23:]
e = bits[-23 - 8 : -23]
return m, e

param = node.get_attr(attr_name)
m, e = get_man_exp(param)
I_pos = int(e, 2) - 127 + 1 # -127 is the bias of the exponent
try:
W_bits = m.rindex('1') + 2 # + 1 for accounting the index starting from 0, +1 for the leading 1 of the exponent
except Exception:
W_bits = 1 # the value is a power of 2, 1 bit is needed, I_pos will offset the bit in the proper place
if param < 0 and W_bits > 1: # for po2 values the increment is not needed
I_pos += 1
W_bits += 1
node.attributes[type_to_infer].precision = FixedPrecisionType(W_bits, I_pos, True if param < 0 else False)
inferred_types.append(type_to_infer)
return inferred_types

def _infer_par_act_precision(self, node, types_to_infer):
inferred_types = []
if 'param_t' in types_to_infer:
inferred_types.extend(self._infer_const_precision(node, 'param_t', 'activ_param'))
return inferred_types
Loading