|
6 | 6 |
|
7 | 7 | from typing import Tuple |
8 | 8 |
|
9 | | -import pytest |
10 | | - |
11 | 9 | import torch |
12 | | -from executorch.backends.arm.test import common |
13 | 10 |
|
14 | 11 | from executorch.backends.arm.test.tester.test_pipeline import ( |
15 | | - TosaPipelineBI, |
| 12 | + OpNotSupportedPipeline, |
16 | 13 | TosaPipelineMI, |
17 | 14 | ) |
18 | 15 |
|
19 | 16 |
|
20 | | -input_t1 = Tuple[torch.Tensor] # Input x |
| 17 | +input_t1 = Tuple[torch.Tensor, torch.Tensor] # Input y |
| 18 | + |
| 19 | + |
| 20 | +class ChannelsLastInput(torch.nn.Module): |
| 21 | + """ |
| 22 | + Test rejection of a partition which has a channels last input. |
| 23 | + """ |
| 24 | + |
| 25 | + inputs: input_t1 = ( |
| 26 | + torch.randn(1, 2, 2, 2).to(memory_format=torch.channels_last), |
| 27 | + torch.randn(1, 2, 2, 2), |
| 28 | + ) |
| 29 | + |
| 30 | + def forward(self, x, y): |
| 31 | + x = x * y |
| 32 | + x = x.to(dtype=torch.int32, memory_format=torch.channels_last) |
| 33 | + x = x / 2 |
| 34 | + return x, y |
| 35 | + |
| 36 | + |
| 37 | +class ChannelsLastOutput(torch.nn.Module): |
| 38 | + """ |
| 39 | + Test rejection of a partition which has a channels last output |
| 40 | + """ |
| 41 | + |
| 42 | + inputs: input_t1 = ( |
| 43 | + torch.randn( |
| 44 | + 1, |
| 45 | + 2, |
| 46 | + 2, |
| 47 | + 2, |
| 48 | + ), |
| 49 | + torch.randn(1, 2, 2, 2), |
| 50 | + ) |
21 | 51 |
|
| 52 | + def forward(self, x, y): |
| 53 | + x = x * y |
| 54 | + x = x.clone(memory_format=torch.channels_last) |
| 55 | + x = x / 2 |
| 56 | + return x, y |
22 | 57 |
|
23 | | -class Conv2D(torch.nn.Module): |
24 | | - inputs: dict[str, input_t1] = { |
25 | | - "randn": (torch.randn(1, 2, 20, 20),), |
26 | | - } |
| 58 | + |
| 59 | +class ChannelsLastInsidePartition(torch.nn.Module): |
| 60 | + """ |
| 61 | + Test a non rejection of a fully partitioned module which changes memory inside the partition. |
| 62 | + The TOSA backend ignores this memory format change, and since the input and output |
| 63 | + has the expected channels_last memory format, the partition should be accepted. |
| 64 | + """ |
| 65 | + |
| 66 | + inputs: input_t1 = ( |
| 67 | + torch.randn( |
| 68 | + 1, |
| 69 | + 2, |
| 70 | + 2, |
| 71 | + 2, |
| 72 | + ), |
| 73 | + torch.randn(1, 2, 2, 2), |
| 74 | + ) |
27 | 75 |
|
28 | 76 | def __init__(self): |
29 | 77 | super().__init__() |
30 | | - self.conv2d = torch.nn.Conv2d(in_channels=2, out_channels=3, kernel_size=(3, 3)) |
| 78 | + self.conv = torch.nn.Conv2d(2, 2, kernel_size=1, bias=False) |
31 | 79 |
|
32 | | - def forward(self, x): |
33 | | - return self.conv2d(x.to(memory_format=torch.channels_last)) |
| 80 | + def forward(self, x, y): |
| 81 | + x = x * y |
| 82 | + x = x.to(memory_format=torch.channels_last) |
| 83 | + x = self.conv(x) |
| 84 | + x = x.clone(memory_format=torch.contiguous_format) |
| 85 | + return x, y |
34 | 86 |
|
35 | 87 |
|
36 | | -@common.parametrize("test_data", Conv2D.inputs) |
37 | | -def test_tosa_MI_pipeline(test_data: input_t1): |
38 | | - module = Conv2D() |
| 88 | +def test_dim_order_ok(): |
39 | 89 | pipeline = TosaPipelineMI[input_t1]( |
40 | | - module, |
41 | | - test_data, |
42 | | - [], |
43 | | - [], |
44 | | - use_to_edge_transform_and_lower=False, |
| 90 | + ChannelsLastInsidePartition(), ChannelsLastInsidePartition.inputs, [] |
45 | 91 | ) |
46 | | - pos = pipeline.find_pos("partition") |
47 | | - pipeline._stages = pipeline._stages[:pos] |
48 | 92 | pipeline.run() |
49 | | - with pytest.raises(RuntimeError): |
50 | | - pipeline.tester.partition() |
51 | | - |
52 | | - |
53 | | -@common.parametrize("test_data", Conv2D.inputs) |
54 | | -def test_tosa_BI_pipeline(test_data: input_t1): |
55 | | - module = Conv2D() |
56 | | - pipeline = TosaPipelineBI[input_t1]( |
57 | | - module, |
58 | | - test_data, |
59 | | - [], |
60 | | - [], |
61 | | - use_to_edge_transform_and_lower=False, |
| 93 | + |
| 94 | + |
| 95 | +def test_channels_last_input(): |
| 96 | + pipeline = OpNotSupportedPipeline[input_t1]( |
| 97 | + ChannelsLastInput(), |
| 98 | + ChannelsLastInput.inputs, |
| 99 | + non_delegated_ops={}, |
| 100 | + n_expected_delegates=0, |
| 101 | + ) |
| 102 | + pipeline.run() |
| 103 | + |
| 104 | + |
| 105 | +def test_channels_last_output(): |
| 106 | + pipeline = OpNotSupportedPipeline[input_t1]( |
| 107 | + ChannelsLastOutput(), |
| 108 | + ChannelsLastOutput.inputs, |
| 109 | + non_delegated_ops={}, |
| 110 | + n_expected_delegates=0, |
62 | 111 | ) |
63 | | - pos = pipeline.find_pos("partition") |
64 | | - pipeline._stages = pipeline._stages[:pos] |
65 | 112 | pipeline.run() |
66 | | - with pytest.raises(RuntimeError): |
67 | | - pipeline.tester.partition() |
|
0 commit comments