@@ -243,15 +243,14 @@ def test_qs8_conv2d_test(self) -> None:
243243 self ._test (
244244 Conv2d (bias = has_bias , transpose = transpose ),
245245 quant_config = get_symmetric_quantization_config (),
246- check_quantized = not transpose , # XNNPackQuantizer does not quantize this pattern yet
247246 )
248247
249248 def test_qs8_conv2d_per_channel (self ) -> None :
250249 for transpose in (True , False ):
251250 self ._test (
252251 Conv2d (transpose = transpose ),
253252 quant_config = get_symmetric_quantization_config (is_per_channel = True ),
254- check_quantized = not transpose , # XNNPackQuantizer does not quantize this pattern yet
253+ delegated = not transpose , # XNNPACK does not support per input channel quantization for transpose convolutions with groups > 1
255254 )
256255
257256 def test_fp32_conv2d_seq (self ) -> None :
@@ -264,7 +263,6 @@ def test_qs8_conv2d_seq(self) -> None:
264263 Conv2dSeq (transpose = transpose ),
265264 conv_count = 2 ,
266265 quant_config = get_symmetric_quantization_config (),
267- check_quantized = not transpose , # XNNPackQuantizer does not quantize this pattern yet
268266 )
269267
270268 def test_fp32_conv2d_single_int_params (self ):
@@ -282,7 +280,6 @@ def test_fp32_conv2d_depthwise(self):
282280 # - Groups must equal In Channels
283281 # - Out Channels must be a positive multiple of In Channels
284282 for transpose in (True , False ):
285-
286283 self ._test (
287284 Conv2d (groups = 2 , in_channels = 2 , out_channels = 6 , transpose = transpose )
288285 )
@@ -292,7 +289,6 @@ def test_qs8_conv2d_depthwise(self):
292289 self ._test (
293290 Conv2d (groups = 2 , in_channels = 2 , out_channels = 6 , transpose = transpose ),
294291 quant_config = get_symmetric_quantization_config (),
295- check_quantized = not transpose , # XNNPackQuantizer does not quantize this pattern yet
296292 )
297293
298294 def test_fp32_conv2d_bn (self ):
@@ -384,7 +380,6 @@ def test_qs8_conv2d_bn(self):
384380 Conv2dBatchNorm (transpose = transpose ),
385381 quant_config = get_symmetric_quantization_config (),
386382 conv_count = 2 ,
387- check_quantized = not transpose , # XNNPackQuantizer does not quantize this pattern yet
388383 )
389384
390385 def test_qs8_conv2d_relu (self ):
@@ -415,7 +410,7 @@ def get_inputs(self):
415410 self ._test (
416411 ConvReLU (transpose = transpose ),
417412 quant_config = get_symmetric_quantization_config (is_per_channel = True ),
418- delegated = not transpose ,
413+ delegated = not transpose , # XNNPACK does not support per input channel quantization for transpose convolutions with groups > 1
419414 )
420415
421416 def test_qs8_conv2d_dw_relu (self ):
@@ -467,9 +462,8 @@ def get_inputs(self):
467462 quant_config = get_symmetric_quantization_config (
468463 is_per_channel = per_channel_quant
469464 ),
470- # xnnpack only supports per output channel quantization for transposed convolutions
471- # XNNPackQuantizer quantizes per input channel currently
472- delegated = not transpose or not per_channel_quant ,
465+ # XNNPACK does not support per input channel quantization for transpose convolutions with groups > 1
466+ delegated = not (transpose and per_channel_quant ),
473467 )
474468
475469 def test_qs8_conv2d_relu_seq (self ):
0 commit comments