Skip to content

Commit c14b2fe

Browse files
committed
Update base for Update on "Use c10 version of half/bfloat16 in executorch"
Accomplished by importing relevant files from c10 into executorch/runtime/core/portable_type/c10, and then using `using` in the top-level ExecuTorch headers. This approach should keep the ExecuTorch build hermetic for embedded use cases. In the future, we should add a CI job to ensure the c10 files stay identical to the PyTorch ones. Differential Revision: [D66106969](https://our.internmc.facebook.com/intern/diff/D66106969/) [ghstack-poisoned]
2 parents 6e6f388 + 443ba3b commit c14b2fe

File tree

87 files changed

+2066
-823
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

87 files changed

+2066
-823
lines changed

.github/workflows/android-perf.yml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ jobs:
9898
- uses: actions/checkout@v3
9999

100100
- name: Prepare the spec
101+
id: prepare
101102
shell: bash
102103
env:
103104
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
@@ -111,7 +112,7 @@ jobs:
111112
# so let's just sed it
112113
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' android-llm-device-farm-test-spec.yml.j2
113114
114-
BENCHMARK_CONFIG_ID="${{ matrix.model }}_${{ matrix.config }}"
115+
BENCHMARK_CONFIG_ID=$(echo "${{ matrix.model }}_${{ matrix.config }}" | sed -e 's/[^A-Za-z0-9._-]/_/g')
115116
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
116117
# later by the upload script
117118
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' android-llm-device-farm-test-spec.yml.j2
@@ -122,6 +123,7 @@ jobs:
122123
123124
# Save the benchmark configs so that we can use it later in the dashboard
124125
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
126+
echo "benchmark-config-id=${BENCHMARK_CONFIG_ID}" >> $GITHUB_OUTPUT
125127
126128
- name: Upload the spec
127129
uses: seemethere/upload-artifact-s3@v5
@@ -141,7 +143,7 @@ jobs:
141143
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
142144
retention-days: 1
143145
if-no-files-found: error
144-
path: extension/benchmark/android/benchmark/${{ matrix.model }}_${{ matrix.config }}.json
146+
path: extension/benchmark/android/benchmark/${{ steps.prepare.outputs.benchmark-config-id }}.json
145147

146148
export-models:
147149
name: export-models

.github/workflows/apple-perf.yml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,7 @@ jobs:
100100
- uses: actions/checkout@v3
101101

102102
- name: Prepare the spec
103+
id: prepare
103104
shell: bash
104105
env:
105106
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
@@ -113,7 +114,7 @@ jobs:
113114
# so let's just sed it
114115
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' default-ios-device-farm-appium-test-spec.yml.j2
115116
116-
BENCHMARK_CONFIG_ID="${{ matrix.model }}_${{ matrix.config }}"
117+
BENCHMARK_CONFIG_ID=$(echo "${{ matrix.model }}_${{ matrix.config }}" | sed -e 's/[^A-Za-z0-9._-]/_/g')
117118
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
118119
# later by the upload script
119120
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' default-ios-device-farm-appium-test-spec.yml.j2
@@ -124,6 +125,7 @@ jobs:
124125
125126
# Save the benchmark configs so that we can use it later in the dashboard
126127
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
128+
echo "benchmark-config-id=${BENCHMARK_CONFIG_ID}" >> $GITHUB_OUTPUT
127129
128130
- name: Upload the spec
129131
uses: seemethere/upload-artifact-s3@v5
@@ -143,7 +145,7 @@ jobs:
143145
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
144146
retention-days: 1
145147
if-no-files-found: error
146-
path: extension/benchmark/apple/Benchmark/${{ matrix.model }}_${{ matrix.config }}.json
148+
path: extension/benchmark/apple/Benchmark/${{ steps.prepare.outputs.benchmark-config-id }}.json
147149

148150
export-models:
149151
name: export-models

.github/workflows/lint.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,19 @@ jobs:
2020
with:
2121
runner: linux.2xlarge
2222
docker-image: executorch-ubuntu-22.04-linter
23+
submodules: 'true'
2324
fetch-depth: 0
2425
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
2526
timeout: 90
2627
script: |
2728
# The generic Linux job chooses to use base env, not the one setup by the image
2829
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
2930
conda activate "${CONDA_ENV}"
31+
32+
# For mypy linting, we need to first install executorch first so that
33+
# it builds the python package information.
34+
BUILD_TOOL="cmake"
35+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}"
3036
3137
CACHE_DIRECTORY="/tmp/.lintbin"
3238
# Try to recover the cached binaries

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
.hypothesis
22
buck-out/
3+
.mypy_cache/
34
buck2-bin/
45
cmake-out*
56
.DS_Store

.lintrunner.toml

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -285,3 +285,49 @@ command = [
285285
'--',
286286
'@{{PATHSFILE}}',
287287
]
288+
289+
[[linter]]
290+
code = 'MYPY'
291+
include_patterns = [
292+
# TODO(https://github.com/pytorch/executorch/issues/7441): Gradually start enabling all folders.
293+
# 'backends/**/*.py',
294+
'build/**/*.py',
295+
'codegen/**/*.py',
296+
# 'devtools/**/*.py',
297+
'docs/**/*.py',
298+
# 'examples/**/*.py',
299+
# 'exir/**/*.py',
300+
# 'extension/**/*.py',
301+
'kernels/**/*.py',
302+
# 'profiler/**/*.py',
303+
'runtime/**/*.py',
304+
'scripts/**/*.py',
305+
# 'test/**/*.py',
306+
# 'util/**/*.py',
307+
'*.py',
308+
]
309+
exclude_patterns = [
310+
'third-party/**',
311+
'**/third-party/**',
312+
'scripts/check_binary_dependencies.py',
313+
]
314+
command = [
315+
'python',
316+
'-m',
317+
'lintrunner_adapters',
318+
'run',
319+
'mypy_linter',
320+
'--config=.mypy.ini',
321+
'--show-disable',
322+
'--',
323+
'@{{PATHSFILE}}'
324+
]
325+
init_command = [
326+
'python',
327+
'-m',
328+
'lintrunner_adapters',
329+
'run',
330+
'pip_init',
331+
'--dry-run={{DRYRUN}}',
332+
'--requirement=requirements-lintrunner.txt',
333+
]

.mypy.ini

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
[mypy]
2+
allow_redefinition = True
3+
warn_unused_configs = True
4+
warn_redundant_casts = True
5+
show_error_codes = True
6+
show_column_numbers = True
7+
disallow_untyped_decorators = True
8+
follow_imports = normal
9+
local_partial_types = True
10+
enable_error_code = possibly-undefined
11+
warn_unused_ignores = False
12+
13+
files =
14+
backends,
15+
codegen,
16+
devtools
17+
examples,
18+
exir,
19+
extension,
20+
kernels,
21+
profiler,
22+
runtime,
23+
scripts,
24+
util
25+
26+
mypy_path = executorch
27+
28+
[mypy-executorch.codegen.*]
29+
follow_untyped_imports = True
30+
31+
[mypy-executorch.devtools.*]
32+
follow_untyped_imports = True
33+
34+
[mypy-executorch.exir.*]
35+
follow_untyped_imports = True
36+
37+
[mypy-executorch.extension.*]
38+
follow_untyped_imports = True
39+
40+
[mypy-executorch.kernels.*]
41+
follow_untyped_imports = True
42+
43+
[mypy-executorch.runtime.*]
44+
follow_untyped_imports = True
45+
46+
[mypy-requests.*]
47+
follow_untyped_imports = True
48+
49+
[mypy-torchgen.*]
50+
follow_untyped_imports = True
51+
52+
[mypy-buck_util]
53+
ignore_missing_imports = True
54+
55+
[mypy-docutils.*]
56+
ignore_missing_imports = True
57+
58+
[mypy-pandas]
59+
ignore_missing_imports = True
60+
61+
[mypy-pytorch_sphinx_theme]
62+
ignore_missing_imports = True
63+
64+
[mypy-ruamel]
65+
ignore_missing_imports = True
66+
67+
[mypy-setuptools.*]
68+
ignore_missing_imports = True
69+
70+
[mypy-sphinx.*]
71+
ignore_missing_imports = True
72+
73+
[mypy-tomllib]
74+
ignore_missing_imports = True
75+
76+
[mypy-yaml]
77+
ignore_missing_imports = True
78+
79+
[mypy-zstd]
80+
ignore_missing_imports = True

backends/cadence/aot/compiler.py

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -18,17 +18,11 @@
1818
)
1919
from executorch.backends.cadence.aot.quantizer.fusion_pass import QuantFusion
2020
from executorch.backends.cadence.aot.quantizer.quantizer import CadenceQuantizer
21-
22-
from executorch.backends.cadence.aot.replace_ops import ReplaceSafeSoftmaxWithSoftmax
2321
from executorch.backends.cadence.aot.utils import (
2422
get_default_memory_config,
2523
MemoryConfig,
26-
model_gm_has_SDPA,
2724
model_is_quantized,
2825
)
29-
from executorch.backends.transforms.decompose_sdpa import (
30-
DecomposeScaledDotProductAttention,
31-
)
3226
from executorch.devtools import generate_etrecord
3327
from executorch.exir import (
3428
EdgeCompileConfig,
@@ -91,16 +85,6 @@ def convert_pt2(
9185
.module()
9286
)
9387

94-
if model_gm_has_SDPA(model_gm):
95-
# Decompose SDPA
96-
DecomposeScaledDotProductAttention(False)(model_gm)
97-
98-
# Swap _safe_softmax with _softmax (see https://github.com/pytorch/pytorch/pull/133882
99-
# for details).
100-
result = ReplaceSafeSoftmaxWithSoftmax()(model_gm)
101-
assert result is not None
102-
model_gm = result.graph_module
103-
10488
# Prepare
10589
prepared_model = prepare_pt2e(model_gm, quantizer)
10690

backends/cadence/aot/compiler_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -129,16 +129,16 @@ def get_transposed_dims(node: torch.fx.Node, dims: List[int]) -> List[int]:
129129

130130

131131
# Capture the effect of permute op on incoming dimension order
132-
def get_permuted_dims(node: torch.fx.Node, dims: Optional[List[int]]) -> List[int]:
132+
def get_permuted_dims(node: torch.fx.Node, dims: Optional[Sequence[int]]) -> List[int]:
133133
"""
134134
Given a permute node, and the incoming dimension ordering of the input
135135
tensor to the permute node, return the net effect of permute op on the
136136
dimension order.
137137
"""
138138
assert node.target == exir_ops.edge.aten.permute_copy.default
139139
# Permute each index of the dimension ordering (dims)
140-
permute_dims = node.args[1]
141-
assert isinstance(permute_dims, List)
140+
# pyre-fixme[6]: This combined typecheck isn't supported yet.
141+
permute_dims: List[int] = list(node.args[1])
142142
assert all(isinstance(x, int) for x in permute_dims)
143143
# If the dims is empty, we can simply return the permute order
144144
if not dims:

backends/cadence/aot/reorder_ops.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -438,9 +438,9 @@ def postpone_dequantize_op(self, graph_module: torch.fx.GraphModule) -> bool:
438438
args=(user, *node.args[1:]),
439439
)
440440
dequant_node.meta = user.meta.copy()
441-
# Remove meta["debug_handle"] on new node. Reassign it at the
442-
# caller level by calling generate_missing_debug_handles
443-
dequant_node.meta.pop("debug_handle")
441+
# Remove meta["debug_handle"] on new node if it exists.
442+
# Reassign it at the caller level by calling generate_missing_debug_handles
443+
dequant_node.meta.pop("debug_handle", None)
444444
user.replace_all_uses_with(dequant_node)
445445
dequant_node.args = (user, *node.args[1:])
446446

backends/cadence/aot/utils.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -235,14 +235,6 @@ def print_ops_info(
235235
)
236236

237237

238-
def model_gm_has_SDPA(model_gm: torch.fx.GraphModule) -> bool:
239-
for node in model_gm.graph.nodes:
240-
if node.op == "call_function":
241-
if node.target == torch.ops.aten.scaled_dot_product_attention.default:
242-
return True
243-
return False
244-
245-
246238
def save_pte_program(
247239
prog: ExecutorchProgramManager, model_name: str, output_dir: str = ""
248240
) -> None:

0 commit comments

Comments
 (0)