Skip to content

Commit 3ad960f

Browse files
committed
Update on "Remove Per-Op mode from DQPartitioner"
Differential Revision: [D71427234](https://our.internmc.facebook.com/intern/diff/D71427234/) [ghstack-poisoned]
2 parents ccd41e4 + 8c8be95 commit 3ad960f

File tree

1 file changed

+14
-14
lines changed

1 file changed

+14
-14
lines changed

backends/xnnpack/test/ops/test_linear.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -536,21 +536,21 @@ def get_qnode_checks(quant_node_checks, dialect):
536536
# )
537537

538538
def test_qd8_f32_per_channel_shared_dq_chain(self):
539-
# for use_bias in (False, True):
540-
module = SharedDQChain(
541-
input_size=13,
542-
output_size=17,
543-
)
544-
inputs = (torch.randn(1, 2, 13),)
539+
for use_bias in (False, True):
540+
module = SharedDQChain(
541+
input_size=13,
542+
output_size=17,
543+
)
544+
inputs = (torch.randn(1, 2, 13),)
545545

546-
self._test_dqlinear(
547-
module,
548-
inputs,
549-
dynamic_shapes=None,
550-
is_per_channel=True,
551-
linear_count=2,
552-
uses_bias=False,
553-
)
546+
self._test_dqlinear(
547+
module,
548+
inputs,
549+
dynamic_shapes=None,
550+
is_per_channel=True,
551+
linear_count=2,
552+
uses_bias=use_bias,
553+
)
554554

555555
def _test_qd8_per_channel_linear(self, dtype: torch.dtype = torch.float):
556556
for uses_bias in (False, True):

0 commit comments

Comments
 (0)