|
6 | 6 |
|
7 | 7 | # pyre-unsafe |
8 | 8 |
|
9 | | - |
10 | 9 | import unittest |
11 | 10 |
|
12 | 11 | import torch |
13 | | -import torch.nn as nn |
14 | | -import torch.nn.functional as F |
15 | 12 | from executorch.devtools.inspector._intermediate_output_capturer import ( |
16 | 13 | IntermediateOutputCapturer, |
17 | 14 | ) |
18 | | - |
| 15 | +from executorch.devtools.inspector.tests.inspector_test_utils import ( |
| 16 | + check_if_final_outputs_match, |
| 17 | + model_registry, |
| 18 | +) |
19 | 19 | from executorch.exir import EdgeCompileConfig, EdgeProgramManager, to_edge |
20 | 20 | from torch.export import export, ExportedProgram |
21 | 21 | from torch.fx import GraphModule |
22 | 22 |
|
23 | 23 |
|
24 | 24 | class TestIntermediateOutputCapturer(unittest.TestCase): |
25 | | - @classmethod |
26 | | - def setUpClass(cls): |
27 | | - class TestModule(nn.Module): |
28 | | - def __init__(self): |
29 | | - super(TestModule, self).__init__() |
30 | | - self.conv = nn.Conv2d( |
31 | | - in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1 |
32 | | - ) |
33 | | - self.conv.weight = nn.Parameter( |
34 | | - torch.tensor( |
35 | | - [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]]] |
36 | | - ) |
37 | | - ) |
38 | | - self.conv.bias = nn.Parameter(torch.tensor([0.0])) |
39 | | - |
40 | | - self.linear = nn.Linear(in_features=4, out_features=2) |
41 | | - self.linear.weight = nn.Parameter( |
42 | | - torch.tensor([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]) |
43 | | - ) |
44 | | - self.linear.bias = nn.Parameter(torch.tensor([0.0, 0.0])) |
45 | | - self.bias = nn.Parameter(torch.tensor([0.5, -0.5]), requires_grad=False) |
46 | | - self.scale = nn.Parameter(torch.tensor([2.0, 0.5]), requires_grad=False) |
47 | | - |
48 | | - def forward(self, x): |
49 | | - x = self.conv(x) |
50 | | - x = x.view(x.size(0), -1) |
51 | | - x = self.linear(x) |
52 | | - x = x + self.bias |
53 | | - x = x - 0.1 |
54 | | - x = x * self.scale |
55 | | - x = x / (self.scale + 1.0) |
56 | | - x = F.relu(x) |
57 | | - x = torch.sigmoid(x) |
58 | | - x1, x2 = torch.split(x, 1, dim=1) |
59 | | - return x1, x2 |
60 | | - |
61 | | - cls.model = TestModule() |
62 | | - cls.input = torch.tensor([[[[1.0, 2.0], [3.0, 4.0]]]], requires_grad=True) |
63 | | - cls.aten_model: ExportedProgram = export(cls.model, (cls.input,), strict=True) |
64 | | - cls.edge_program_manager: EdgeProgramManager = to_edge( |
65 | | - cls.aten_model, compile_config=EdgeCompileConfig(_check_ir_validity=True) |
| 25 | + def _set_up_model(self, model_name): |
| 26 | + model = model_registry[model_name]() |
| 27 | + input_tensor = model.get_input() |
| 28 | + aten_model: ExportedProgram = export(model, (input_tensor,), strict=True) |
| 29 | + edge_program_manager: EdgeProgramManager = to_edge( |
| 30 | + aten_model, compile_config=EdgeCompileConfig(_check_ir_validity=True) |
66 | 31 | ) |
67 | | - cls.graph_module: GraphModule = cls.edge_program_manager._edge_programs[ |
| 32 | + graph_module: GraphModule = edge_program_manager._edge_programs[ |
68 | 33 | "forward" |
69 | 34 | ].module() |
70 | | - cls.capturer = IntermediateOutputCapturer(cls.graph_module) |
71 | | - cls.intermediate_outputs = cls.capturer.run_and_capture(cls.input) |
72 | | - |
73 | | - def test_keying_with_debug_handle_tuple(self): |
74 | | - for key in self.intermediate_outputs.keys(): |
75 | | - self.assertIsInstance(key, tuple) |
76 | | - |
77 | | - def test_tensor_cloning_and_detaching(self): |
78 | | - for output in self.intermediate_outputs.values(): |
79 | | - if isinstance(output, torch.Tensor): |
80 | | - self.assertFalse(output.requires_grad) |
81 | | - self.assertTrue(output.is_leaf) |
82 | | - |
83 | | - def test_placeholder_nodes_are_skipped(self): |
84 | | - for node in self.graph_module.graph.nodes: |
85 | | - if node.op == "placeholder": |
86 | | - self.assertNotIn( |
87 | | - node.meta.get("debug_handle"), self.intermediate_outputs |
| 35 | + capturer = IntermediateOutputCapturer(graph_module) |
| 36 | + intermediate_outputs = capturer.run_and_capture(input_tensor) |
| 37 | + return input_tensor, graph_module, capturer, intermediate_outputs |
| 38 | + |
| 39 | + def test_models(self): |
| 40 | + available_models = list(model_registry.keys()) |
| 41 | + for model_name in available_models: |
| 42 | + with self.subTest(model=model_name): |
| 43 | + input_tensor, graph_module, capturer, intermediate_outputs = ( |
| 44 | + self._set_up_model(model_name) |
88 | 45 | ) |
89 | 46 |
|
90 | | - def test_multiple_outputs_capture(self): |
91 | | - outputs = self.capturer.run_and_capture(self.input) |
92 | | - for output in outputs.values(): |
93 | | - if isinstance(output, tuple): |
94 | | - self.assertEqual(len(output), 2) |
95 | | - for part in output: |
96 | | - self.assertIsInstance(part, torch.Tensor) |
97 | | - |
98 | | - def test_capture_correct_outputs(self): |
99 | | - expected_outputs_with_handles = { |
100 | | - (10,): torch.tensor([[[[7.7000, 6.7000], [4.7000, 3.7000]]]]), |
101 | | - (11,): torch.tensor([[7.7000, 6.7000, 4.7000, 3.7000]]), |
102 | | - (12,): torch.tensor( |
103 | | - [[0.1000, 0.5000], [0.2000, 0.6000], [0.3000, 0.7000], [0.4000, 0.8000]] |
104 | | - ), |
105 | | - (13,): torch.tensor([[5.0000, 14.1200]]), |
106 | | - (14,): torch.tensor([[5.5000, 13.6200]]), |
107 | | - (15,): torch.tensor([[5.4000, 13.5200]]), |
108 | | - (16,): torch.tensor([[10.8000, 6.7600]]), |
109 | | - (17,): torch.tensor([3.0000, 1.5000]), |
110 | | - (18,): torch.tensor([[3.6000, 4.5067]]), |
111 | | - (19,): torch.tensor([[3.6000, 4.5067]]), |
112 | | - (20,): torch.tensor([[0.9734, 0.9891]]), |
113 | | - (21,): [torch.tensor([[0.9734]]), torch.tensor([[0.9891]])], |
114 | | - } |
115 | | - self.assertEqual( |
116 | | - len(self.intermediate_outputs), len(expected_outputs_with_handles) |
117 | | - ) |
118 | | - |
119 | | - for debug_handle, expected_output in expected_outputs_with_handles.items(): |
120 | | - actual_output = self.intermediate_outputs.get(debug_handle) |
121 | | - self.assertIsNotNone(actual_output) |
122 | | - if isinstance(expected_output, list): |
123 | | - self.assertIsInstance(actual_output, list) |
124 | | - self.assertEqual(len(actual_output), len(expected_output)) |
125 | | - for actual, expected in zip(actual_output, expected_output): |
126 | | - self.assertTrue( |
127 | | - torch.allclose(actual, expected, rtol=1e-4, atol=1e-5) |
128 | | - ) |
129 | | - else: |
| 47 | + # Test keying with debug handle tuple |
| 48 | + for key in intermediate_outputs.keys(): |
| 49 | + self.assertIsInstance(key, tuple) |
| 50 | + |
| 51 | + # Test tensor cloning and detaching |
| 52 | + for output in intermediate_outputs.values(): |
| 53 | + if isinstance(output, torch.Tensor): |
| 54 | + self.assertFalse(output.requires_grad) |
| 55 | + self.assertTrue(output.is_leaf) |
| 56 | + |
| 57 | + # Test placeholder nodes are skipped |
| 58 | + for node in graph_module.graph.nodes: |
| 59 | + if node.op == "placeholder": |
| 60 | + self.assertNotIn(node.meta.get("debug_handle"), node.meta) |
| 61 | + |
| 62 | + # Test multiple outputs capture |
| 63 | + outputs = capturer.run_and_capture(input_tensor) |
| 64 | + for output in outputs.values(): |
| 65 | + if isinstance(output, tuple): |
| 66 | + self.assertEqual(len(output), 2) |
| 67 | + for part in output: |
| 68 | + self.assertIsInstance(part, torch.Tensor) |
| 69 | + |
| 70 | + # Test capture correct outputs |
130 | 71 | self.assertTrue( |
131 | | - torch.allclose(actual_output, expected_output, rtol=1e-4, atol=1e-5) |
| 72 | + check_if_final_outputs_match(model_name, intermediate_outputs) |
132 | 73 | ) |
0 commit comments