Skip to content

Commit 4fbcde7

Browse files
authored
Merge branch 'main' into md-erf-reviewed
2 parents 41f9da8 + 3600d4f commit 4fbcde7

File tree

72 files changed

+1768
-1410
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+1768
-1410
lines changed

.github/scripts/run_nm.py

Lines changed: 171 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import re
8+
import subprocess
9+
import sys
10+
from dataclasses import dataclass
11+
from typing import Dict, List, Optional, Union
12+
13+
14+
@dataclass
15+
class Symbol:
16+
name: str
17+
addr: int
18+
size: int
19+
symbol_type: str
20+
21+
22+
class Parser:
23+
def __init__(self, elf: str, toolchain_prefix: str = "", filter=None):
24+
self.elf = elf
25+
self.toolchain_prefix = toolchain_prefix
26+
self.symbols: Dict[str, Symbol] = self._get_nm_output()
27+
self.filter = filter
28+
29+
@staticmethod
30+
def run_nm(
31+
elf_file_path: str, args: Optional[List[str]] = None, nm: str = "nm"
32+
) -> str:
33+
"""
34+
Run the nm command on the specified ELF file.
35+
"""
36+
args = [] if args is None else args
37+
cmd = [nm] + args + [elf_file_path]
38+
try:
39+
result = subprocess.run(cmd, check=True, capture_output=True, text=True)
40+
return result.stdout
41+
except FileNotFoundError:
42+
print(f"Error: 'nm' command not found. Please ensure it's installed.")
43+
sys.exit(1)
44+
except subprocess.CalledProcessError as e:
45+
print(f"Error running nm on {elf_file_path}: {e}")
46+
print(f"stderr: {e.stderr}")
47+
sys.exit(1)
48+
49+
def _get_nm_output(self) -> Dict[str, Symbol]:
50+
args = [
51+
"--print-size",
52+
"--size-sort",
53+
"--reverse-sort",
54+
"--demangle",
55+
"--format=bsd",
56+
]
57+
output = Parser.run_nm(
58+
self.elf,
59+
args,
60+
nm=self.toolchain_prefix + "nm" if self.toolchain_prefix else "nm",
61+
)
62+
lines = output.splitlines()
63+
symbols = []
64+
symbol_pattern = re.compile(
65+
r"(?P<addr>[0-9a-fA-F]+)\s+(?P<size>[0-9a-fA-F]+)\s+(?P<type>\w)\s+(?P<name>.+)"
66+
)
67+
68+
def parse_line(line: str) -> Optional[Symbol]:
69+
70+
match = symbol_pattern.match(line)
71+
if match:
72+
addr = int(match.group("addr"), 16)
73+
size = int(match.group("size"), 16)
74+
type_ = match.group("type").strip().strip("\n")
75+
name = match.group("name").strip().strip("\n")
76+
return Symbol(name=name, addr=addr, size=size, symbol_type=type_)
77+
return None
78+
79+
for line in lines:
80+
symbol = parse_line(line)
81+
if symbol:
82+
symbols.append(symbol)
83+
84+
assert len(symbols) > 0, "No symbols found in nm output"
85+
if len(symbols) != len(lines):
86+
print(
87+
"** Warning: Not all lines were parsed, check the output of nm. Parsed {len(symbols)} lines, given {len(lines)}"
88+
)
89+
if any(symbol.size == 0 for symbol in symbols):
90+
print("** Warning: Some symbols have zero size, check the output of nm.")
91+
92+
# TODO: Populate the section and module fields from the linker map if available (-Wl,-Map=linker.map)
93+
return {symbol.name: symbol for symbol in symbols}
94+
95+
def print(self):
96+
print(f"Elf: {self.elf}")
97+
98+
def print_table(filter=None, filter_name=None):
99+
print("\nAddress\t\tSize\tType\tName")
100+
# Apply filter and sort symbols
101+
symbols_to_print = {
102+
name: sym
103+
for name, sym in self.symbols.items()
104+
if not filter or filter(sym)
105+
}
106+
sorted_symbols = sorted(
107+
symbols_to_print.items(), key=lambda x: x[1].size, reverse=True
108+
)
109+
110+
# Print symbols and calculate total size
111+
size_total = 0
112+
for name, sym in sorted_symbols:
113+
print(f"{hex(sym.addr)}\t\t{sym.size}\t{sym.symbol_type}\t{sym.name}")
114+
size_total += sym.size
115+
116+
# Print summary
117+
symbol_percent = len(symbols_to_print) / len(self.symbols) * 100
118+
print("-----")
119+
print(f"> Total bytes: {size_total}")
120+
print(
121+
f"Counted: {len(symbols_to_print)}/{len(self.symbols)}, {symbol_percent:0.2f}% (filter: '{filter_name}')"
122+
)
123+
print("=====\n")
124+
125+
# Print tables with different filters
126+
def is_executorch_symbol(s):
127+
return "executorch" in s.name or s.name.startswith("et")
128+
129+
FILTER_NAME_TO_FILTER_AND_LABEL = {
130+
"all": (None, "All"),
131+
"executorch": (is_executorch_symbol, "ExecuTorch"),
132+
"executorch_text": (
133+
lambda s: is_executorch_symbol(s) and s.symbol_type.lower() == "t",
134+
"ExecuTorch .text",
135+
),
136+
}
137+
138+
filter_func, label = FILTER_NAME_TO_FILTER_AND_LABEL.get(
139+
self.filter, FILTER_NAME_TO_FILTER_AND_LABEL["all"]
140+
)
141+
print_table(filter_func, label)
142+
143+
144+
if __name__ == "__main__":
145+
import argparse
146+
147+
parser = argparse.ArgumentParser(
148+
description="Process ELF file and linker map file."
149+
)
150+
parser.add_argument(
151+
"-e", "--elf-file-path", required=True, help="Path to the ELF file"
152+
)
153+
parser.add_argument(
154+
"-f",
155+
"--filter",
156+
required=False,
157+
default="all",
158+
help="Filter symbols by pre-defined filters",
159+
choices=["all", "executorch", "executorch_text"],
160+
)
161+
parser.add_argument(
162+
"-p",
163+
"--toolchain-prefix",
164+
required=False,
165+
default="",
166+
help="Optional toolchain prefix for nm",
167+
)
168+
169+
args = parser.parse_args()
170+
p = Parser(args.elf_file_path, args.toolchain_prefix, filter=args.filter)
171+
p.print()

.github/workflows/trunk.yml

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -231,6 +231,60 @@ jobs:
231231
# Run arm unit tests using the simulator
232232
backends/arm/test/test_arm_baremetal.sh test_pytest_ethosu_fvp
233233
234+
test-arm-cortex-m-size-test:
235+
name: test-arm-cortex-m-size-test
236+
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
237+
permissions:
238+
id-token: write
239+
contents: read
240+
with:
241+
runner: linux.2xlarge
242+
docker-image: executorch-ubuntu-22.04-arm-sdk
243+
submodules: 'true'
244+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
245+
timeout: 90
246+
script: |
247+
# The generic Linux job chooses to use base env, not the one setup by the image
248+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
249+
conda activate "${CONDA_ENV}"
250+
251+
source .ci/scripts/utils.sh
252+
install_executorch "--use-pt-pinned-commit"
253+
.ci/scripts/setup-arm-baremetal-tools.sh
254+
source examples/arm/ethos-u-scratch/setup_path.sh
255+
256+
# User baremetal toolchain
257+
arm-none-eabi-c++ --version
258+
toolchain_cmake=examples/arm/ethos-u-setup/arm-none-eabi-gcc.cmake
259+
toolchain_cmake=$(realpath ${toolchain_cmake})
260+
261+
# Build and test size test
262+
bash test/build_size_test.sh "-DCMAKE_TOOLCHAIN_FILE=${toolchain_cmake} -DEXECUTORCH_BUILD_ARM_BAREMETAL=ON"
263+
elf="cmake-out/test/size_test"
264+
265+
# Dump basic info
266+
ls -al ${elf}
267+
arm-none-eabi-size ${elf}
268+
269+
# Dump symbols
270+
python .github/scripts/run_nm.py -e ${elf}
271+
python .github/scripts/run_nm.py -e ${elf} -f "executorch" -p "arm-none-eabi-"
272+
python .github/scripts/run_nm.py -e ${elf} -f "executorch_text" -p "arm-none-eabi-"
273+
274+
# Add basic guard - TODO: refine this!
275+
arm-none-eabi-strip ${elf}
276+
output=$(ls -la ${elf})
277+
arr=($output)
278+
size=${arr[4]}
279+
threshold="102400" # 100KiB
280+
echo "size: $size, threshold: $threshold"
281+
if [[ "$size" -le "$threshold" ]]; then
282+
echo "Success $size <= $threshold"
283+
else
284+
echo "Fail $size > $threshold"
285+
exit 1
286+
fi
287+
234288
test-coreml-delegate:
235289
name: test-coreml-delegate
236290
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main

CMakeLists.txt

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -645,13 +645,18 @@ target_link_options_shared_lib(executorch)
645645
# Real integrations should supply their own YAML file that only lists the
646646
# operators necessary for the models that will run.
647647
#
648+
if(EXECUTORCH_BUILD_KERNELS_OPTIMIZED)
649+
# find pytorch lib here to make it available to all
650+
# sub-directories. Find it before including portable so that
651+
# optimized_portable_kernels can use it.
652+
find_package_torch_headers()
653+
endif()
654+
648655
if(BUILD_EXECUTORCH_PORTABLE_OPS)
649656
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/kernels/portable)
650657
endif()
651658

652659
if(EXECUTORCH_BUILD_KERNELS_OPTIMIZED)
653-
# find pytorch lib here to make it available to all sub-directories
654-
find_package_torch_headers()
655660
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/kernels/optimized)
656661
endif()
657662

backends/nxp/README.md

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
# NXP eIQ Neutron Backend
2+
3+
This subtree contains the ExecuTorch Backend implementation for the
4+
[eIQ® Neutron Neural Processing Unit (NPU)](https://www.nxp.com/applications/technologies/ai-and-machine-learning/eiq-neutron-npu:EIQ-NEUTRON-NPU).
5+
6+
The eIQ® Neutron NPU is a highly scalable accelerator core architecture providing machine learning (ML) acceleration,
7+
able to support common and critical tasks for edge AI such as anomaly detection, speech recognition,
8+
image classification, object detection, facial recognition, image segmentation, and generative AI use cases like
9+
large and small language models (LLMs & SLMs) and text-to-speech (TTS).
10+
The architecture provides power and performance optimized NPUs integrated with NXP's broad portfolio of
11+
microcontrollers and applications processors.
12+
13+
The eIQ Neutron NPUs offer support for a wide variety of neural network types such as CNN, RNN, TCN and Transformer
14+
networks, as well as the ability to adapt and scale to new model architectures, topologies and layer types introduced
15+
to AI workloads. ML application development with the eIQ Neutron NPU is fully supported by the
16+
[eIQ machine learning software development environment](https://www.nxp.com/design/design-center/software/eiq-ml-development-environment/eiq-toolkit-for-end-to-end-model-development-and-deployment:EIQ-TOOLKIT).
17+
The eIQ AI SW Stack provides a streamlined development experience for developers and end-users of NXP products.
18+
eIQ extensions connect broader AI ecosystems to the edge, such as the NVIDIA TAO extension, which enables developers to bring AI models trained and fine-tuned with TAO to NXP-powered edge devices.
19+
20+
21+
## Supported NXP platforms
22+
At this moment following eIQ® Neutron NPU variants and NXP platforms are supported by the NXP eIQ Neutron Backend:
23+
24+
* **eIQ Neutron N3-64**, available on [i.MX RT700](https://www.nxp.com/products/i.MX-RT700)
25+
26+
In the future the NXP eIQ Neutron Backend will be extended to support [i.MX 9 Application Processors](https://www.nxp.com/products/processors-and-microcontrollers/arm-processors/i-mx-applications-processors/i-mx-9-processors:IMX9-PROCESSORS)
27+
with eIQ Neutron NPU, like the [i.MX 95](https://www.nxp.com/products/iMX95).
28+
29+
30+
## Layout
31+
TBD
32+
33+
## Backend Status and Maturity
34+
**Current Status:** Prototype Quality
35+
36+
The eIQ Neutron NPU Backend should be considered as prototype quality at this moment. Subject to significant changes and
37+
improvements. NXP and the ExecuTorch community is actively developing this codebase.
38+
39+
## Help & Improvements
40+
If you have problems or questions or have suggestions for ways to make
41+
implementation and testing better, please reach out to the NXP representative for the SoC you are interested in using,
42+
or your distribution partner contact.
43+
44+
Or raise the issue here on ExecuTorch GitHub, label it with `module: nxp` and our ML team will address it on a priority-basis.

backends/qualcomm/_passes/__init__.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,51 +4,51 @@
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
66

7-
from .annotate_decomposed import AnnotateDecomposed
87
from .annotate_quant_attrs import AnnotateQuantAttrs
9-
from .constant_i64_to_i32 import ConstantI64toI32
8+
from .annotate_stack import AnnotateStack
9+
from .annotate_unbind import AnnotateUnbind
1010
from .convert_bmm_to_matmul import ConvertBmmToMatmul
1111
from .convert_conv1d_to_conv2d import ConvertConv1dToConv2d
12-
from .convert_to_linear import ConvertToLinear
1312
from .decompose_any import DecomposeAny
1413
from .decompose_einsum import DecomposeEinsum
1514
from .decompose_expm1 import DecomposeExpM1
1615
from .decompose_linalg_vector_norm import DecomposeLinalgVectorNorm
1716
from .decompose_silu import DecomposeSilu
1817
from .expand_broadcast_tensor_shape import ExpandBroadcastTensorShape
18+
from .fixed_linear_keep_dim import FixedLinearKeepDim
1919
from .fold_qdq import FoldQDQ
2020
from .fuse_consecutive_transpose import FuseConsecutiveTranspose
21+
from .i64_to_i32 import I64toI32
2122
from .insert_io_qdq import InsertIOQDQ
2223
from .insert_requantize import InsertRequantize
2324
from .layout_transform import LayoutTransform
2425
from .lift_constant_scalar_operands import LiftConstantScalarOperands
2526
from .recompose_pixel_unshuffle import RecomposePixelUnshuffle
26-
from .recompose_prelu import RecomposePReLU
2727
from .recompose_rms_norm import RecomposeRmsNorm
2828
from .reduce_dynamic_range import ReduceDynamicRange
2929
from .remove_redundancy import RemoveRedundancy
3030
from .replace_arange_args import ReplaceArangeArgs
3131
from .replace_index_put_input import ReplaceIndexPutInput
3232
from .replace_inf_values import ReplaceInfValues
33-
from .tensor_i64_to_i32 import TensorI64toI32
33+
from .tag_quant_io import TagQuantIO
3434

3535

3636
__all__ = [
37-
AnnotateDecomposed,
3837
AnnotateQuantAttrs,
39-
ConstantI64toI32,
38+
AnnotateStack,
39+
AnnotateUnbind,
4040
ConvertBmmToMatmul,
4141
ConvertConv1dToConv2d,
42-
RecomposePReLU,
43-
ConvertToLinear,
4442
DecomposeAny,
4543
DecomposeEinsum,
4644
DecomposeExpM1,
4745
DecomposeLinalgVectorNorm,
4846
DecomposeSilu,
4947
ExpandBroadcastTensorShape,
48+
FixedLinearKeepDim,
5049
FoldQDQ,
5150
FuseConsecutiveTranspose,
51+
I64toI32,
5252
InsertIOQDQ,
5353
InsertRequantize,
5454
LayoutTransform,
@@ -60,5 +60,5 @@
6060
ReplaceArangeArgs,
6161
ReplaceIndexPutInput,
6262
ReplaceInfValues,
63-
TensorI64toI32,
63+
TagQuantIO,
6464
]

0 commit comments

Comments
 (0)