Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
5ee84ee
Setup: add venv/ to .gitignore
Nov 2, 2025
1e28b1c
Feat (graph/quantize): add QuantHardSwish support to layerwise quanti…
Nov 2, 2025
10196c3
Feat (graph/flexml): add QuantHardSwish support for FlexML target
Nov 2, 2025
b6bf19c
Feat (nn): export QuantHardSwish in public API
Nov 2, 2025
7f7d5a4
Feat (nn): add QuantHardSwish quantized activation layer
Nov 2, 2025
8f498e8
Feat (quant_tensor): add F.hardswish handler for QuantTensor
Nov 2, 2025
ea1e7f1
Test (nn): add comprehensive tests for QuantHardSwish
Nov 2, 2025
dedd117
Fix: trailing whitespace and end-of-file issues
Nov 2, 2025
1a9e8ba
Merge pull request #1 from surajkarki66/feat/hardswish
surajkarki66 Nov 2, 2025
bb2d554
Fix: remove unsupported HardSwish from FlexML target
Nov 3, 2025
2e3cc0f
feat: add QuantHardSwish with signed quantization for baseline
Nov 3, 2025
dc09244
Merge pull request #2 from surajkarki66/feat/hardswish
surajkarki66 Nov 3, 2025
7d23f14
fix: change act_quant to Uint8ActPerTensorFloat
Nov 3, 2025
c661049
Merge pull request #3 from surajkarki66/feat/hardswish
surajkarki66 Nov 3, 2025
c661503
chore: remove venv/
Nov 4, 2025
af266e2
fix: remove scale/zp preservation
Nov 4, 2025
4529585
Merge pull request #4 from surajkarki66/feat/hardswish
surajkarki66 Nov 4, 2025
f40a222
Remove unused function
Giuseppe5 Nov 5, 2025
be089d7
chore: remove unused function
Nov 5, 2025
baae4de
Merge branch 'Xilinx:dev' into dev
surajkarki66 Nov 5, 2025
37999cc
Merge pull request #5 from surajkarki66/feat/hardswish
surajkarki66 Nov 5, 2025
25e6bc5
chore: Update QuantHardSwish docstring to reflect unsigned quantization
Nov 6, 2025
a698da6
fix: Fix QuantHardSwish test to match unsigned quantization behavior
Nov 6, 2025
0723995
Merge pull request #6 from surajkarki66/feat/hardswish
surajkarki66 Nov 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/brevitas/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from .hadamard_classifier import HadamardClassifier
from .quant_accumulator import ClampQuantAccumulator
from .quant_accumulator import TruncQuantAccumulator
from .quant_activation import QuantHardSwish
from .quant_activation import QuantHardTanh
from .quant_activation import QuantIdentity
from .quant_activation import QuantReLU
Expand Down
31 changes: 31 additions & 0 deletions src/brevitas/nn/quant_activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,3 +118,34 @@ def __init__(
act_quant=act_quant,
return_quant_tensor=return_quant_tensor,
**kwargs)


class QuantHardSwish(QuantNLAL):
"""
Quantized HardSwish activation.

This is a baseline implementation using unsigned quantization (Uint8ActPerTensorFloat).
HardSwish output range is approximately [-0.33, ∞), and unsigned quantization clamps
the small negative values to zero, which is acceptable for most use cases.

For hardware-specific optimizations, consider:
- Asymmetric quantization with learned zero-point for better range utilization
- Hardware-specific quantization schemes that match your deployment target

Users should validate this implementation works for their specific use case.
"""

def __init__(
self,
act_quant: Optional[ActQuantType] = Uint8ActPerTensorFloat,
input_quant: Optional[ActQuantType] = None,
return_quant_tensor: bool = False,
**kwargs):
QuantNLAL.__init__(
self,
act_impl=nn.Hardswish,
passthrough_act=False,
input_quant=input_quant,
act_quant=act_quant,
return_quant_tensor=return_quant_tensor,
**kwargs)
46 changes: 46 additions & 0 deletions tests/brevitas/nn/test_act.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import pytest
import torch

from brevitas.nn import QuantHardSwish
from brevitas.nn import QuantHardTanh
from brevitas.nn import QuantIdentity
from brevitas.nn import QuantReLU
Expand All @@ -24,6 +25,51 @@ def test_module_init_const_scaling(self):
mod = QuantReLU(max_val=6, scaling_impl_type='CONST')


class TestQuantHardSwish:

def test_module_init_default(self):
mod = QuantHardSwish()

def test_module_init_with_bit_width(self):
# Test with custom bit width
mod = QuantHardSwish(bit_width=4)

def test_forward_pass(self):
mod = QuantHardSwish()
inp = torch.randn(1, 20, 10, 10)
out = mod(inp)
assert out.shape == inp.shape

def test_output_behavior(self):
mod = QuantHardSwish()
mod.eval()
# For large positive inputs, hardswish output should be close to input (positive)
inp_positive = torch.tensor([5.0, 10.0, 100.0])
out_positive = mod(inp_positive)
assert (out_positive >= 0).all().item()

# For small negative inputs, unquantized hardswish can produce small negative values
# HardSwish(x) ≈ -0.33 at minimum (around x=-3)
# With unsigned quantization (default), these small negative values are clamped to zero
inp_negative = torch.tensor([-2.0, -1.0, -0.5])
out_negative = mod(inp_negative)
assert (out_negative >= 0).all().item()

def test_training_eval_modes(self):
mod = QuantHardSwish()
inp = torch.randn(2, 6, 16, 16)

# Training mode
mod.train()
out_train = mod(inp)
assert out_train.shape == inp.shape

# Eval mode
mod.eval()
out_eval = mod(inp)
assert out_eval.shape == inp.shape


class TestQuantDelay:

@pytest.mark.parametrize("bw_quant_type", [(4, "INT"), (1, "BINARY"), (2, "TERNARY")])
Expand Down
Loading