Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
11601cd
Added automatic inference of `param_t` constant for parametrised acti…
nghielme Dec 3, 2024
72026fb
pre-commit fixes
nghielme Dec 4, 2024
10ec7a2
Fix the case the param is a power of 2
nghielme Dec 4, 2024
29f0831
Fix for a specific case related to no bits in the mantissa
nghielme Dec 4, 2024
ecf5c2c
Merge branch 'main' into leaky_relu_quant_alpha
nghielme Dec 5, 2024
49e5a75
Merge branch 'main' into leaky_relu_quant_alpha
nghielme Dec 9, 2024
baba0f3
Merge branch 'main' into leaky_relu_quant_alpha
JanFSchulte Dec 16, 2024
0808580
Merge branch 'main' into leaky_relu_quant_alpha
nghielme Dec 19, 2024
79f7372
Update subproject commit reference in example-models
nghielme Jan 25, 2025
a4f5fa5
Merge branch 'main' into leaky_relu_quant_alpha
nghielme Jan 29, 2025
a39b3ef
Merge remote-tracking branch 'upstream/main' into leaky_relu_quant_alpha
jmitrevs Jul 24, 2025
463acf7
first, untested version of constant precison
jmitrevs Jul 25, 2025
a8641d9
try using Fxp for precision setting
jmitrevs Jul 26, 2025
8442c37
fix bug in max attribute of unsigned FixedPrecisionType
jmitrevs Jul 26, 2025
c7a6051
add unit test for precision from constant
jmitrevs Jul 26, 2025
38e443c
Merge branch 'main' into leaky_relu_quant_alpha
JanFSchulte Jul 29, 2025
f8315d2
Merge branch 'main' into leaky_relu_quant_alpha
jmitrevs Sep 7, 2025
15eefe9
integrate suggested test_precision_from_constant_unit change
jmitrevs Sep 7, 2025
d2cdb64
Merge remote-tracking branch 'upstream/main' into leaky_relu_quant_alpha
jmitrevs Sep 8, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 42 additions & 3 deletions hls4ml/model/optimizer/passes/infer_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from collections.abc import Iterable

import numpy as np
from fxpmath import Fxp

from hls4ml.model.optimizer import ConfigurableOptimizerPass
from hls4ml.model.types import (
Expand Down Expand Up @@ -573,9 +574,17 @@ def _infer_par_act_precision(self, node, types_to_infer):
# For threshold relu, set the parameter precision to be the input precision by default;
# for other parametrized activations, just allow the default precision to be used.
# Can override these values in the configuration by explicitly setting them.
if 'param_t' in types_to_infer and node.get_attr('activation').lower() == 'thresholdedrelu':
in_type = node.get_input_variable().type.precision
node.attributes['param_t'].precision = in_type
if 'param_t' in types_to_infer:
if node.get_attr('activation').lower() == 'thresholdedrelu':
# For threshold relu, set the parameter precision to be the input precision by default;
in_type = node.get_input_variable().type.precision
node.attributes['param_t'].precision = in_type
inferred_types.append('param_t')
else:
# find a constant to represent the values
param = node.get_attr('activ_param')
precision = _get_precision_from_constant(param)
node.attributes['param_t'].precision = precision
inferred_types.append('param_t')

return inferred_types
Expand All @@ -594,3 +603,33 @@ def _infer_prelu_act_precision(self, node, types_to_infer):
inferred_types.append('param_t')

return inferred_types


def _get_precision_from_constant(value: int | float, max_width=8):
"""A utility function to find a fixed type to store the constant

Arguments:
value (int or float): the constant value
max_width (int, optional): the maximum fixed width (+ 1 if signed). Defaults to 8

Returns:
FixedPrecisionType: the type to use
"""
if value == 0:
return FixedPrecisionType(width=1, integer=1, signed=False)

signed = value < 0
absval = abs(value)
# check if power of 2
mantissa, exp = np.frexp(absval)
if mantissa == 0.5: # is it a power of 2?
# One could consider returning an ExponentPrecisionType here.
# Decided on FixedPrecisionType everywhere since ExponentPrecisionType is less supported
return FixedPrecisionType(1 + signed, exp, signed)

# now is the general case. First try Fxp
fxpval = Fxp(value, signed=signed)
if isinstance(fxpval.n_word, int) and fxpval.n_word <= max_width:
return FixedPrecisionType(fxpval.n_word, signed + fxpval.n_int, signed)

return FixedPrecisionType(signed + max_width, signed + exp, signed)
2 changes: 1 addition & 1 deletion hls4ml/model/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ def min(self):

@property
def max(self):
return 2.0 ** (self.integer - 1) - 2.0**-self.fractional
return 2.0 ** (self.integer - self.signed) - 2.0**-self.fractional


class XnorPrecisionType(PrecisionType):
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
]
dynamic = [ "version" ]
dependencies = [ "h5py", "numpy", "pydigitalwavetools==1.1", "pyyaml", "quantizers" ]
dependencies = [ "fxpmath", "h5py", "numpy", "pydigitalwavetools==1.1", "pyyaml", "quantizers" ]

optional-dependencies.da = [ "da4ml>=0.2.1,<=0.4" ]
optional-dependencies.doc = [
Expand Down
29 changes: 29 additions & 0 deletions test/pytest/test_auto_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from tensorflow.keras.models import Sequential

import hls4ml
from hls4ml.model.optimizer.passes.infer_precision import _get_precision_from_constant

test_root_path = Path(__file__).parent

Expand Down Expand Up @@ -254,3 +255,31 @@ def test_auto_precision_dense(keras_model_dense, data_1d, io_type, backend):
y_keras = model.predict(data).flatten()
y_hls = hls_model.predict(data).flatten()
np.testing.assert_allclose(y_keras, y_hls, rtol=2e-2, atol=5e-2, verbose=True)


@pytest.mark.parametrize(
"val, expected_width",
[
(0, 1),
(-1024, 2),
(1024, 1),
(0.03125, 1),
(-0.03125, 2),
(1.25, 3),
(-1.25, 4),
(1.1, 8),
(-1.1, 9),
],
)
def test_precision_from_constant_unit(val, expected_width):
"""Test determining precision needed for a constant."""
max_width = 8
fp = _get_precision_from_constant(val, max_width)

assert fp.min <= val <= fp.max
assert fp.width == expected_width
assert fp.signed == (val < 0)

quantum = 2.0**-fp.fractional
if expected_width < max_width:
assert val % quantum == 0
Loading