We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 67108b6 commit ab8d4e7Copy full SHA for ab8d4e7
backends/qualcomm/quantizer/qconfig.py
@@ -52,7 +52,9 @@ def _derive_bias_qparams_fn(
52
act_scale, weight_scale
53
)
54
derived_scale = (broadcast_act_scale * broadcast_weight_scale).to(torch.float32)
55
- derived_zero = torch.zeros(derived_scale.size(), device=weight_zp.device).to(torch.int32)
+ derived_zero = torch.zeros(derived_scale.size(), device=weight_zp.device).to(
56
+ torch.int32
57
+ )
58
if isinstance(weight_obs_or_fq, PerBlockParamObserver):
59
# keep maximum scale of each channel for bias
60
derived_scale = (
0 commit comments