33# Please refer to the license found in the LICENSE file in the root directory of the source tree.
44
55import copy
6+ import sys
67import unittest
78
89import coremltools as ct
1516from executorch .backends .apple .coreml .compiler import CoreMLBackend
1617from executorch .backends .apple .coreml .partition import CoreMLPartitioner
1718from executorch .exir .backend .utils import format_delegated_graph
19+ from executorch .runtime import Runtime
1820
1921
2022@torch .library .custom_op ("unsupported::linear" , mutates_args = ())
@@ -35,6 +37,10 @@ def _(
3537 return torch .ops .aten .linear .default (x , w , b )
3638
3739
40+ _TEST_RUNTIME = sys .platform == "darwin"
41+ _TEST_RUNTIME = False # Disable until segfault fixed: https://github.com/pytorch/executorch/issues/12408
42+
43+
3844class TestCoreMLPartitioner (unittest .TestCase ):
3945 edge_compile_config = executorch .exir .EdgeCompileConfig ()
4046
@@ -236,8 +242,6 @@ def forward(self, a, x, b):
236242
237243 delegated_program_manager = edge_program_manager .to_backend (CoreMLPartitioner ())
238244
239- print (delegated_program_manager .exported_program ())
240-
241245 for node in delegated_program_manager .exported_program ().graph .nodes :
242246 if node .op == "call_function" :
243247 assert node .target .__name__ in [
@@ -249,51 +253,62 @@ def forward(self, a, x, b):
249253 with self .assertRaises (NotImplementedError ):
250254 edge_program_manager2 .to_backend (CoreMLPartitioner (lower_full_graph = True ))
251255
252- def test_symint_arg (self ):
253- class Model (torch .nn .Module ):
254- def forward (self , x , w , b , y ):
255- val = y .item ()
256- out = torch .ops .unsupported .linear .default (x , w , b + val ) + val
257- out2 = torch .ops .aten .linear .default (out , w , b ) + val
258- return out2
259-
260- model = Model ()
261- model .eval ()
262- example_inputs = (
263- torch .randn (2 , 2 ),
264- torch .randn (2 , 2 ),
265- torch .randn (2 , 2 ),
266- torch .tensor (2 ),
267- )
268- exir_program_aten = torch .export .export (model , example_inputs )
269-
270- edge_program_manager = executorch .exir .to_edge (exir_program_aten )
271-
272- delegated_program_manager = edge_program_manager .to_backend (CoreMLPartitioner ())
273-
274- # This op has symbolic args
275- assert (
276- "torch.ops.aten.scalar_tensor.default"
277- in delegated_program_manager .exported_program ().graph_module .code
278- )
279-
280- def test_tag_constant_data_false (self ):
256+ # def test_symint_arg(self):
257+ # class Model(torch.nn.Module):
258+ # def forward(self, x, w, b, y):
259+ # val = y.item()
260+ # torch._check(val >= 0)
261+ # torch._check(val < 2)
262+ # out = torch.ops.aten.linear.default(x, w, b)
263+ # out2 = out.relu()[val]
264+ # return out2
265+
266+ # model = Model()
267+ # model.eval()
268+ # example_inputs = (
269+ # torch.randn(2, 2),
270+ # torch.randn(2, 2),
271+ # torch.randn(2, 2),
272+ # torch.tensor(2),
273+ # )
274+ # exir_program_aten = torch.export.export(model, example_inputs)
275+
276+ # edge_program_manager = executorch.exir.to_edge(exir_program_aten)
277+
278+ # delegated_program_manager = edge_program_manager.to_backend(CoreMLPartitioner(skip_ops_for_coreml_delegation=["aten.scalar_tensor.default"]))
279+
280+ # # This op has symbolic args
281+ # assert (
282+ # "torch.ops.aten._assert_scalar.default"
283+ # in delegated_program_manager.exported_program().graph_module.code
284+ # )
285+
286+ # if _TEST_RUNTIME:
287+ # et_prog = delegated_program_manager.to_executorch()
288+ # runtime = Runtime.get()
289+ # program = runtime.load_program(et_prog.buffer)
290+ # method = program.load_method("forward")
291+ # et_outputs = method.execute(*example_inputs)[0]
292+ # eager_outputs = model(*example_inputs)
293+ # self.assertTrue(torch.allclose(et_outputs, eager_outputs, atol=1e-02, rtol=1e-02))
294+
295+ def test_take_over_constant_data_false (self ):
281296 class Model (torch .nn .Module ):
282297 def __init__ (self ):
283298 super ().__init__ ()
284- self .linear = torch .nn .Linear (2 , 2 )
299+ self .linear = torch .nn .Linear (50 , 100 )
285300
286301 def forward (self , x ):
287302 return self .linear (x )
288303
289304 model = Model ()
290305 model .eval ()
291- example_inputs = (torch .randn (2 , 2 ),)
306+ example_inputs = (torch .randn (2 , 50 ),)
292307 exir_program_aten = torch .export .export (model , example_inputs )
293308
294309 edge_program_manager = executorch .exir .to_edge_transform_and_lower (
295310 exir_program_aten ,
296- partitioner = [CoreMLPartitioner (tag_constant_data = False )],
311+ partitioner = [CoreMLPartitioner (take_over_constant_data = False )],
297312 )
298313 for node in edge_program_manager .exported_program ().graph .nodes :
299314 if (
@@ -305,6 +320,20 @@ def forward(self, x):
305320 # lowered_module_0, x, p_linear_weight, p_linear_bias
306321 assert len (node .args ) == 4
307322
323+ if _TEST_RUNTIME :
324+ et_prog = edge_program_manager .to_executorch ()
325+ runtime = Runtime .get ()
326+ program = runtime .load_program (et_prog .buffer )
327+ method = program .load_method ("forward" )
328+ et_outputs = method .execute (* example_inputs )[0 ]
329+ eager_outputs = model (* example_inputs )
330+ self .assertTrue (
331+ torch .allclose (et_outputs , eager_outputs , atol = 1e-02 , rtol = 1e-02 )
332+ )
333+
334+ with open ("/tmp/et_model.pte" , "wb" ) as file :
335+ et_prog .write_to_file (file )
336+
308337
309338if __name__ == "__main__" :
310339 test_runner = TestCoreMLPartitioner ()
@@ -313,5 +342,5 @@ def forward(self, x):
313342 test_runner .test_ops_to_not_decompose ()
314343 test_runner .test_buffer ()
315344 test_runner .test_lower_full_graph ()
316- test_runner .test_symint_arg ()
317- test_runner .test_tag_constant_data_false ()
345+ # test_runner.test_symint_arg()
346+ test_runner .test_take_over_constant_data_false ()
0 commit comments