|
2 | 2 | from collections.abc import Iterable |
3 | 3 |
|
4 | 4 | import numpy as np |
5 | | -from fxpmath import Fxp |
6 | 5 |
|
7 | 6 | from hls4ml.model.optimizer import ConfigurableOptimizerPass |
| 7 | +from hls4ml.model.optimizer.passes.bit_exact import minimal_kif |
8 | 8 | from hls4ml.model.types import ( |
9 | 9 | FixedPrecisionType, |
10 | 10 | IntegerPrecisionType, |
@@ -618,18 +618,6 @@ def _get_precision_from_constant(value: int | float, max_width=8): |
618 | 618 | if value == 0: |
619 | 619 | return FixedPrecisionType(width=1, integer=1, signed=False) |
620 | 620 |
|
621 | | - signed = value < 0 |
622 | | - absval = abs(value) |
623 | | - # check if power of 2 |
624 | | - mantissa, exp = np.frexp(absval) |
625 | | - if mantissa == 0.5: # is it a power of 2? |
626 | | - # One could consider returning an ExponentPrecisionType here. |
627 | | - # Decided on FixedPrecisionType everywhere since ExponentPrecisionType is less supported |
628 | | - return FixedPrecisionType(1 + signed, exp, signed) |
629 | | - |
630 | | - # now is the general case. First try Fxp |
631 | | - fxpval = Fxp(value, signed=signed) |
632 | | - if isinstance(fxpval.n_word, int) and fxpval.n_word <= max_width: |
633 | | - return FixedPrecisionType(fxpval.n_word, signed + fxpval.n_int, signed) |
634 | | - |
635 | | - return FixedPrecisionType(signed + max_width, signed + exp, signed) |
| 621 | + signed, integer, fraction = map(int, minimal_kif(np.array(value))) |
| 622 | + width = min(signed + integer + fraction, signed + max_width) |
| 623 | + return FixedPrecisionType(width, signed + integer, bool(signed)) |
0 commit comments