|
8 | 8 | import torch |
9 | 9 | import torchvision.models as models |
10 | 10 | from executorch.backends.apple.coreml.partition import CoreMLPartitioner |
11 | | -from executorch.backends.apple.mps.partition import MPSPartitioner |
12 | | -from executorch.exir.backend.backend_details import CompileSpec |
| 11 | +from executorch.exir import to_edge |
13 | 12 | from executorch.backends.xnnpack.partition.xnnpack_partitioner import XnnpackPartitioner |
14 | 13 | from executorch.exir import to_edge_transform_and_lower |
15 | | -import ssl |
16 | | -import certifi |
17 | 14 |
|
18 | 15 |
|
19 | 16 | def main() -> None: |
20 | 17 | model = models.mobilenet_v3_small(weights="DEFAULT").eval() |
21 | 18 | sample_inputs = (torch.randn(1, 3, 224, 224),) |
22 | 19 |
|
| 20 | + et_program_portable = to_edge( |
| 21 | + torch.export.export(model, sample_inputs), |
| 22 | + ).to_executorch() |
| 23 | + |
23 | 24 | et_program_coreml = to_edge_transform_and_lower( |
24 | 25 | torch.export.export(model, sample_inputs), |
25 | 26 | partitioner=[CoreMLPartitioner()], |
26 | 27 | ).to_executorch() |
27 | 28 |
|
28 | | - et_program_mps = to_edge_transform_and_lower( |
| 29 | + |
| 30 | + # MPS backend doesn't work yet with pip install today. |
| 31 | + # Currently, it is just falling back to portable ops instead. |
| 32 | + # |
| 33 | + # Please install ExecuTorch from source if you want to run to MPS backend: |
| 34 | + # |
| 35 | + # https://pytorch.org/executorch/main/using-executorch-building-from-source.html#environment-setup |
| 36 | + # ./install_executorch.sh --pybind mps coreml xnnpack |
| 37 | + # ./backends/apple/mps/install_requirements.sh |
| 38 | + # |
| 39 | + # et_program_mps = to_edge_transform_and_lower( |
| 40 | + # torch.export.export(model, sample_inputs), |
| 41 | + # partitioner=[MPSPartitioner([CompileSpec("use_fp16", bytes([True]))])], |
| 42 | + # ).to_executorch() |
| 43 | + et_program_mps = to_edge( |
29 | 44 | torch.export.export(model, sample_inputs), |
30 | | - partitioner=[MPSPartitioner([CompileSpec("use_fp16", bytes([True]))])], |
31 | 45 | ).to_executorch() |
32 | 46 |
|
33 | 47 | et_program_xnnpack = to_edge_transform_and_lower( |
34 | 48 | torch.export.export(model, sample_inputs), |
35 | 49 | partitioner=[XnnpackPartitioner()], |
36 | 50 | ).to_executorch() |
37 | 51 |
|
38 | | - save_path = "executorch-examples/mv3/apple/ExecuTorchDemo/ExecuTorchDemo/Resources/Models/MobileNet/" |
39 | | - with open(save_path+"mv3_coreml_all.pte", "wb") as file: |
| 52 | + with open("mv3.pte", "wb") as file: |
| 53 | + et_program_portable.write_to_file(file) |
| 54 | + with open("mv3_coreml_all.pte", "wb") as file: |
40 | 55 | et_program_coreml.write_to_file(file) |
41 | | - with open(save_path+"mv3_mps_float16.pte", "wb") as file: |
| 56 | + with open("mv3_mps_float16.pte", "wb") as file: |
42 | 57 | et_program_mps.write_to_file(file) |
43 | | - with open(save_path+"mv3_xnnpack_fp32.pte", "wb") as file: |
| 58 | + with open("mv3_xnnpack_fp32.pte", "wb") as file: |
44 | 59 | et_program_xnnpack.write_to_file(file) |
45 | 60 |
|
46 | 61 |
|
|
0 commit comments