Skip to content

Commit 8859b06

Browse files
committed
NXP backend: Add tests for quantizer pattern order invariance
1 parent 338c637 commit 8859b06

File tree

2 files changed

+80
-0
lines changed

2 files changed

+80
-0
lines changed

backends/nxp/tests/models.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -236,3 +236,16 @@ def __init__(self):
236236
def forward(self, x):
237237
x = self.conv(x)
238238
return torch.permute(x, [0, 2, 1, 3])
239+
240+
241+
class Conv2dReLUMaxPoolModule(torch.nn.Module):
242+
def __init__(self):
243+
super().__init__()
244+
self.conv = torch.nn.Conv2d(3, 64, 2, bias=False)
245+
self.relu = torch.nn.ReLU()
246+
self.pool = torch.nn.MaxPool2d(2, 2)
247+
248+
def forward(self, x):
249+
x = self.conv(x)
250+
x = self.relu(x)
251+
return self.pool(x)

backends/nxp/tests/test_quantizer.py

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55

66
# Tests for NeutronQuantizer.
77

8+
from copy import deepcopy
9+
810
import executorch.backends.nxp.tests.models as models
911
import torch
1012
from executorch.backends.nxp.quantizer.neutron_quantizer import NeutronQuantizer
@@ -271,3 +273,68 @@ def test_quantizer_conv2d_permute():
271273
assert nodes[7].name == "dequantize_per_tensor_default_2"
272274
assert nodes[8].name == "permute"
273275
assert nodes[9].name == "quantize_per_tensor_default_3"
276+
277+
278+
def test_multiple_shared_spec_ops_in_row():
279+
"""
280+
This test demonstrates that having two operators in a row, both relying on quantizers
281+
with SharedSpecPattern, does not break the quantization process.
282+
"""
283+
model = models.Conv2dReLUMaxPoolModule()
284+
model.eval()
285+
286+
example_input = (torch.ones(1, 3, 64, 64),)
287+
quantizer = NeutronQuantizer()
288+
graph_module = torch.export.export_for_training(
289+
model, example_input, strict=True
290+
).module()
291+
292+
# noinspection PyTypeChecker
293+
m = prepare_pt2e(graph_module, quantizer)
294+
m(*example_input)
295+
m = convert_pt2e(m)
296+
297+
# Dry run
298+
m(*example_input)
299+
300+
nodes = list(m.graph.nodes)
301+
302+
assert len(nodes) == 15
303+
assert nodes[-5].name == "dequantize_per_tensor_default_3"
304+
assert nodes[-4].name == "max_pool2d"
305+
assert nodes[-3].name == "quantize_per_tensor_default_4"
306+
307+
308+
def test_quantizers_order_invariance():
309+
"""
310+
This test demonstrates that the order of quantizers in NeutronQuantizer
311+
does not affect the resulting graph.
312+
"""
313+
model = models.Conv2dReLUModule()
314+
model.eval()
315+
316+
example_input = (torch.ones(1, 4, 64, 64),)
317+
quantizer = NeutronQuantizer()
318+
319+
graph_module = torch.export.export_for_training(
320+
model, example_input, strict=True
321+
).module()
322+
323+
m = prepare_pt2e(deepcopy(graph_module), quantizer)
324+
m(*example_input)
325+
m = convert_pt2e(m)
326+
327+
quantizer.quantizers = quantizer.quantizers[::-1]
328+
m_reversed = prepare_pt2e(graph_module, quantizer)
329+
m_reversed(*example_input)
330+
m_reversed = convert_pt2e(m)
331+
332+
# Dry run
333+
m(*example_input)
334+
m_reversed(*example_input)
335+
336+
nodes = list(m.graph.nodes)
337+
nodes_reversed = list(m.graph.nodes)
338+
339+
assert len(nodes) == len(nodes_reversed)
340+
assert all(n == n_reversed for n, n_reversed in zip(nodes, nodes_reversed))

0 commit comments

Comments
 (0)