Skip to content

Commit 36e26e7

Browse files
authored
Merge branch 'main' into bump-tokenizers-pin-again
2 parents 7dc117e + 668e730 commit 36e26e7

File tree

106 files changed

+2692
-597
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

106 files changed

+2692
-597
lines changed

.ci/scripts/test_wheel_package_qnn.sh

Lines changed: 26 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ PYTHON_VERSION=$1
9898
# Check wheel does NOT contain qualcomm/sdk
9999
# ----------------------------
100100
echo "Checking wheel does not contain qualcomm/sdk..."
101-
SDK_FILES=$(unzip -l "$WHEEL_FILE" | awk '{print $4}' | grep "executorch/backends/qualcomm/sdk" || true)
101+
SDK_FILES=$(unzip -l "$WHEEL_FILE" | awk '{print $4}' | grep -E "executorch/backends/qualcomm/sdk" || true)
102102
if [ -n "$SDK_FILES" ]; then
103103
echo "ERROR: Wheel package contains unexpected qualcomm/sdk files:"
104104
echo "$SDK_FILES"
@@ -111,7 +111,7 @@ fi
111111
# Check .so files in the wheel
112112
# ----------------------------
113113
echo "Checking for .so files inside the wheel..."
114-
WHEEL_SO_FILES=$(unzip -l "$WHEEL_FILE" | awk '{print $4}' | grep "executorch/backends/qualcomm/python" || true)
114+
WHEEL_SO_FILES=$(unzip -l "$WHEEL_FILE" | awk '{print $4}' | grep -E "executorch/backends/qualcomm/python" || true)
115115
if [ -z "$WHEEL_SO_FILES" ]; then
116116
echo "ERROR: No .so files found in wheel under executorch/backends/qualcomm/python"
117117
exit 1
@@ -139,8 +139,30 @@ run_core_tests () {
139139
echo "=== [$LABEL] Installing wheel & deps ==="
140140
"$PIPBIN" install --upgrade pip
141141
"$PIPBIN" install "$WHEEL_FILE"
142-
"$PIPBIN" install torch=="2.9.0.dev20250906" --index-url "https://download.pytorch.org/whl/nightly/cpu"
143-
"$PIPBIN" install --pre torchao --index-url "https://download.pytorch.org/whl/nightly/cpu"
142+
TORCH_VERSION=$(
143+
"$PYBIN" - <<'PY'
144+
import runpy
145+
module_vars = runpy.run_path("torch_pin.py")
146+
print(module_vars["TORCH_VERSION"])
147+
PY
148+
)
149+
150+
NIGHTLY_VERSION=$(
151+
"$PYBIN" - <<'PY'
152+
import runpy
153+
module_vars = runpy.run_path("torch_pin.py")
154+
print(module_vars["NIGHTLY_VERSION"])
155+
PY
156+
)
157+
echo "=== [$LABEL] Install torch==${TORCH_VERSION}.${NIGHTLY_VERSION} ==="
158+
159+
# Install torchao based on the pinned PyTorch version
160+
"$PIPBIN" install torch=="${TORCH_VERSION}.${NIGHTLY_VERSION}" --index-url "https://download.pytorch.org/whl/nightly/cpu"
161+
162+
# Install torchao based on the pinned commit from third-party/ao submodule
163+
pushd "$REPO_ROOT/third-party/ao" > /dev/null
164+
USE_CPP=0 "$PYBIN" setup.py develop
165+
popd > /dev/null
144166

145167
echo "=== [$LABEL] Import smoke tests ==="
146168
"$PYBIN" -c "import executorch; print('executorch imported successfully')"

.github/workflows/add-unanswered-to-project.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ jobs:
1212
- name: Add open issues and open, non-draft PRs to org project (excluding certain authors)
1313
uses: actions/github-script@v7
1414
with:
15+
github-token: ${{ secrets.ET_EXT_CONTRIB }}
1516
script: |
1617
const projectId = "PVT_kwDOAUB9vs4A_PUL"; // PyTorch org project 136
1718
const owner = 'pytorch';

CMakeLists.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -729,9 +729,6 @@ endif()
729729

730730
if(EXECUTORCH_BUILD_PYBIND)
731731

732-
# Add codegen tools subdirectory for selective_build pybind module
733-
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/codegen/tools)
734-
735732
if(NOT EXECUTORCH_BUILD_EXTENSION_DATA_LOADER)
736733
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/data_loader)
737734
endif()
@@ -740,6 +737,9 @@ if(EXECUTORCH_BUILD_PYBIND)
740737
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/devtools)
741738
endif()
742739

740+
# Add codegen tools subdirectory for selective_build pybind module
741+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/codegen/tools)
742+
743743
# Create bundled_module target only for pybindings when bundled_program exists
744744
# This target has hard dependencies on devtools generated headers
745745
if(TARGET bundled_program)

backends/aoti/utils.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ inline executorch::aten::ScalarType dtype_to_scalar_type(int32_t dtype) {
3636
switch (dtype) {
3737
case 6: // PyTorch's float32 dtype code
3838
return executorch::aten::ScalarType::Float;
39+
case 15: // PyTorch's bfloat16 dtype code
40+
return executorch::aten::ScalarType::BFloat16;
3941
// Future support for additional dtypes can be added here
4042
default:
4143
ET_LOG(Error, "Unsupported dtype: %d for ScalarType conversion", dtype);

backends/apple/coreml/compiler/coreml_preprocess.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import logging
77

88
import shutil
9+
import tempfile
910
import uuid
1011
from dataclasses import asdict, dataclass
1112
from enum import Enum
@@ -415,7 +416,7 @@ def preprocess_model(
415416
mlmodel: ct.models.MLModel, model_type: MODEL_TYPE
416417
) -> PreprocessResult:
417418
identifier = "executorch_" + str(uuid.uuid4())
418-
dir_path: Path = Path("tmp") / identifier
419+
dir_path: Path = Path(tempfile.gettempdir()) / identifier
419420
model_dir_path: Path = dir_path / "lowered_module"
420421
model_spec: ct.proto.Model_pb2 = mlmodel.get_spec()
421422
logger.warning(

backends/arm/_passes/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@
9191
ReplaceScalarWithTensorArgPassTOSABI,
9292
ReplaceScalarWithTensorArgPassTOSAMI,
9393
)
94+
from .rewrite_upsample import RewriteUpsamplePass # noqa
9495
from .scalars_to_attribute_pass import ScalarsToAttributePass # noqa
9596
from .size_adjust_input_pass import SizeAdjustInputPass # noqa
9697
from .to_tosa_memory_format_pass import ToTosaMemoryFormatPass # noqa

backends/arm/_passes/annotate_decomposed_matmul.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@
1111

1212
import torch
1313
from executorch.backends.arm._passes.arm_pass_utils import create_node
14+
from executorch.backends.arm._passes.fold_qdq_with_annotated_qparams_pass import (
15+
FoldAndAnnotateQParamsPass,
16+
)
1417

1518
from executorch.backends.arm.constants import DQ_OPS, Q_OPS
1619
from executorch.exir.dialects._ops import ops as exir_ops
@@ -29,7 +32,7 @@ class AnnotateDecomposedMatmulPass(ExportPass):
2932
matmul-op (can be mm or bmm).
3033
"""
3134

32-
_passes_required_after: Set[Type[ExportPass]] = set()
35+
_passes_required_after: Set[Type[ExportPass]] = {FoldAndAnnotateQParamsPass}
3336

3437
def _match_partition_to_node(
3538
self, node: torch.fx.Node, partitioned_inputs: List[torch.fx.Node]

backends/arm/_passes/arm_pass_manager.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@
9191
ReplaceScalarWithTensorArgPassTOSABI,
9292
ReplaceScalarWithTensorArgPassTOSAMI,
9393
RetraceFoldedDtypesPass,
94+
RewriteUpsamplePass,
9495
ScalarsToAttributePass,
9596
SizeAdjustInputPass,
9697
ToTosaMemoryFormatPass,
@@ -112,6 +113,8 @@
112113
from executorch.exir.pass_manager import PassManager
113114
from executorch.exir.passes.remove_graph_asserts_pass import RemoveGraphAssertsPass
114115
from torch.fx import GraphModule
116+
from torch.fx.passes.infra.pass_base import PassResult
117+
from torch.nn.modules import Module
115118

116119

117120
class ArmPassManager(PassManager):
@@ -204,6 +207,7 @@ def _tosa_INT_pipeline(self, exported_program: ExportedProgram) -> GraphModule:
204207
# needs to happen before AddBiasPass, but after the table ops are inserted
205208
# to be able to validate that conv2d has right dtype arguments.
206209
self.add_pass(DecomposeConv2dWithInt16ActivationPass())
210+
self.add_pass(RewriteUpsamplePass(exported_program))
207211
self.add_pass(AddBiasPass(exported_program))
208212

209213
self.add_pass(FuseEqualPlaceholdersPass(exported_program))
@@ -288,6 +292,7 @@ def _tosa_FP_pipeline(self, exported_program: ExportedProgram) -> GraphModule:
288292
self.add_pass(FuseViewCopyTransform())
289293
self.add_pass(FuseConstantArgsPass(exported_program))
290294
self.add_pass(CastInt64BuffersToInt32Pass(exported_program))
295+
self.add_pass(RewriteUpsamplePass(exported_program))
291296
self.add_pass(AddBiasPass(exported_program))
292297
self.add_pass(InsertTableOpsPass(exported_program))
293298
self.add_pass(FuseEqualPlaceholdersPass(exported_program))
@@ -355,3 +360,20 @@ def transform_for_annotation_pipeline(self, graph_module: GraphModule):
355360
self.add_pass(DecomposeMaskedFill())
356361

357362
return self._transform(graph_module)
363+
364+
def __call__(self, module: Module) -> PassResult:
365+
try:
366+
return super().__call__(module)
367+
except Exception as e:
368+
first_exception = e.__cause__ or e.__context__ or e
369+
import re
370+
371+
message = e.args[0]
372+
m = re.search(r"An error occurred when running the '([^']+)' pass", message)
373+
if m:
374+
pass_name = m.group(1)
375+
first_exception.args = (
376+
f"{pass_name}: {first_exception.args[0]}",
377+
*first_exception.args[1:],
378+
)
379+
raise first_exception

backends/arm/_passes/conv1d_unsqueeze_pass.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@
88

99
from typing import Set, Type
1010

11+
from executorch.backends.arm._passes.add_bias_pass import AddBiasPass
12+
from executorch.backends.arm._passes.size_adjust_input_pass import SizeAdjustInputPass
13+
1114
from executorch.exir.dialects._ops import ops as exir_ops
1215
from executorch.exir.pass_base import ExportPass
1316

@@ -23,7 +26,7 @@ class Conv1dUnsqueezePass(ExportPass):
2326
3) squeeze the output back down to 3d.
2427
"""
2528

26-
_passes_required_after: Set[Type[ExportPass]] = set()
29+
_passes_required_after: Set[Type[ExportPass]] = {AddBiasPass, SizeAdjustInputPass}
2730

2831
def call_operator(self, op, args, kwargs, meta):
2932
if op != exir_ops.edge.aten.convolution.default:

backends/arm/_passes/convert_any_default_dim_dims_pass.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66
from typing import Set, Type
77

88
import torch
9+
from executorch.backends.arm._passes.convert_squeezes_to_view import (
10+
ConvertSqueezesToViewPass,
11+
)
912
from executorch.exir.dialects._ops import ( # type: ignore[import-not-found]
1013
ops as exir_ops,
1114
)
@@ -46,7 +49,7 @@ class ConvertAnyDefaultDimDimsPass(ExportPass):
4649
squeeze(dim = [dim1, dim2])
4750
"""
4851

49-
_passes_required_after: Set[Type[ExportPass]] = set()
52+
_passes_required_after: Set[Type[ExportPass]] = {ConvertSqueezesToViewPass}
5053

5154
def call(self, graph_module: torch.fx.GraphModule):
5255
modified = False

0 commit comments

Comments
 (0)