Skip to content

Commit 07e9451

Browse files
Merge branch 'main' into gh/manuelcandales/128/orig
2 parents a7b5cf5 + f888bdf commit 07e9451

File tree

86 files changed

+1036
-447
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

86 files changed

+1036
-447
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
5616fa4a68718ead203314a3467f7dd9547153ae
1+
9b498d3bb28b8e3411ce464dd2755c5b96d92c8f

.ci/docker/common/install_conda.sh

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
1313
install_miniconda() {
1414
BASE_URL="https://repo.anaconda.com/miniconda"
1515
CONDA_FILE="Miniconda3-py${PYTHON_VERSION//./}_${MINICONDA_VERSION}-Linux-x86_64.sh"
16-
if [[ $(uname -m) == "aarch64" ]]; then
16+
if [[ $(uname -m) == "aarch64" ]]; then
1717
CONDA_FILE="Miniconda3-py${PYTHON_VERSION//./}_${MINICONDA_VERSION}-Linux-aarch64.sh"
1818
fi
1919

@@ -71,4 +71,8 @@ fix_conda_ubuntu_libstdcxx() {
7171
install_miniconda
7272
install_python
7373
install_pip_dependencies
74-
fix_conda_ubuntu_libstdcxx
74+
# Hack breaks the job on aarch64 but is still necessary everywhere
75+
# else.
76+
if [ "$(uname -m)" != "aarch64" ]; then
77+
fix_conda_ubuntu_libstdcxx
78+
fi

.ci/docker/conda-env-ci.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
cmake=3.26.4
1+
cmake=3.31.2
22
ninja=1.10.2
33
libuv
44
llvm-openmp

.ci/scripts/check_c10_sync.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,3 +12,4 @@ pushd pytorch
1212
git checkout "$pytorch_pin"
1313
popd
1414
"$(dirname "${BASH_SOURCE[0]}")"/compare_dirs.sh runtime/core/portable_type/c10/c10 pytorch/c10
15+
"$(dirname "${BASH_SOURCE[0]}")"/compare_dirs.sh runtime/core/portable_type/c10/torch/standalone pytorch/torch/standalone

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -705,7 +705,7 @@ if(EXECUTORCH_BUILD_EXECUTOR_RUNNER)
705705
endif()
706706

707707
add_executable(executor_runner ${_executor_runner__srcs})
708-
if(CMAKE_BUILD_TYPE STREQUAL "Release")
708+
if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
709709
if(APPLE)
710710
target_link_options(executor_runner PRIVATE "LINKER:-dead_strip")
711711
else()

backends/arm/_passes/decompose_sqrt_pass.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
# LICENSE file in the root directory of this source tree.
55

66
# pyre-unsafe
7+
from typing import Tuple, Union
8+
79
import torch
810
from executorch.exir.dialects._ops import ops as exir_ops
911
from executorch.exir.pass_base import ExportPass
@@ -15,7 +17,7 @@
1517
)
1618

1719

18-
def get_sqrt_decomposition(op) -> tuple:
20+
def get_sqrt_decomposition(op) -> Union[Tuple, torch._ops.OpOverload]:
1921
# TODO : "MLETORCH-863 : Replace current sqrt -> pow.Tensor_Scalar workaround with pow.Tensor_Tensor"
2022
if op in edge_sqrt_ops:
2123
return exir_ops.edge.aten.pow.Tensor_Scalar

backends/arm/_passes/replace_scalar_with_tensor_pass.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
# pyre-unsafe
77

88

9-
from typing import Dict
9+
from typing import Dict, Union
1010

1111
import torch
1212
from executorch.backends.transforms.replace_scalar_with_tensor import (
@@ -18,7 +18,10 @@
1818

1919

2020
# Operators that are included for both TOSA profiles
21-
_common_ops: Dict[EdgeOpOverload, EdgeOpOverload] = {
21+
_common_ops: Dict[
22+
Union[EdgeOpOverload, torch._ops.OpOverload],
23+
Union[EdgeOpOverload, torch._ops.OpOverload],
24+
] = {
2225
exir_ops.edge.aten.add.Scalar: exir_ops.edge.aten.add.Tensor,
2326
exir_ops.edge.aten.sub.Scalar: exir_ops.edge.aten.sub.Tensor,
2427
exir_ops.edge.aten.mul.Scalar: exir_ops.edge.aten.mul.Tensor,

backends/arm/operators/op_index_tensor.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -189,11 +189,16 @@ def define_node(
189189
if i == 0:
190190
gather_index_name = reshaped_idxs.name
191191
else:
192+
add_idxs = tosa_graph.addIntermediate(
193+
reshaped_idxs.shape,
194+
reshaped_idxs.dtype,
195+
)
192196
tosa_graph.addOperator(
193197
ts.TosaOp.Op().ADD,
194198
[gather_index_name, reshaped_idxs.name],
195-
[gather_index_name],
199+
[add_idxs.name],
196200
)
201+
gather_index_name = add_idxs.name
197202

198203
gather_vals_shape = [N, K, C]
199204
reshaped_input = tosa_graph.addIntermediate(gather_vals_shape, values.dtype)
@@ -314,11 +319,16 @@ def define_node(
314319
if i == 0:
315320
gather_index_name = reshaped_idxs.name
316321
else:
322+
add_idxs = tosa_graph.addIntermediate(
323+
reshaped_idxs.shape,
324+
reshaped_idxs.dtype,
325+
)
317326
tosa_graph.addOperator(
318327
ts.TosaOp.Op().ADD,
319328
[gather_index_name, reshaped_idxs.name],
320-
[gather_index_name],
329+
[add_idxs.name],
321330
)
331+
gather_index_name = add_idxs.name
322332

323333
gather_vals_shape = [N, K, C]
324334
reshaped_input = tosa_graph.addIntermediate(gather_vals_shape, values.dtype)

backends/arm/test/test_model.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def get_args():
6464
parser.add_argument(
6565
"--timeout",
6666
required=False,
67-
default=60 * 10,
67+
default=60 * 20,
6868
help="Timeout in seconds used when running the model",
6969
)
7070
args = parser.parse_args()
@@ -165,11 +165,6 @@ def build_ethosu_runtime(
165165
extra_flags: str,
166166
elf_build_path: str,
167167
):
168-
169-
extra_build_flag = ""
170-
if extra_flags:
171-
extra_build_flag = f"--extra_build_flags={extra_flags}"
172-
173168
run_external_cmd(
174169
[
175170
"bash",
@@ -182,7 +177,7 @@ def build_ethosu_runtime(
182177
"--build_type=Release",
183178
f"--system_config={system_config}",
184179
f"--memory_mode={memory_mode}",
185-
extra_build_flag,
180+
f"--extra_build_flags=-DET_DUMP_OUTPUT=OFF {extra_flags}",
186181
f"--output={elf_build_path}",
187182
]
188183
)

backends/cadence/aot/memory_constraints.py

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import typing
1212
from collections import defaultdict
1313
from dataclasses import dataclass
14-
from typing import cast, DefaultDict, Iterable, Optional, Sequence
14+
from typing import Callable, cast, DefaultDict, Iterable, Optional, Sequence, TypeAlias
1515

1616
import torch
1717
import torch.fx
@@ -573,23 +573,34 @@ def compute_slice_and_select_loc_constraints(
573573
graph_module.recompile()
574574

575575

576+
ConstraintsGenPass: TypeAlias = Callable[
577+
[MemConstraints],
578+
Callable[[torch.fx.GraphModule], Optional[PassResult]],
579+
]
580+
581+
576582
# The class to generate all the constraints that will be passed on to the memory
577583
# planning algorithm.
578584
class GenerateMemConstraints:
579585
def __init__(
580586
self,
581587
mem_constraints: MemConstraints,
582-
additional_constraint_gen_passes: list | None = None,
588+
additional_constraint_gen_passes: Sequence[ConstraintsGenPass] | None = None,
583589
) -> None:
584-
self.mem_constraints = mem_constraints
585-
self.additional_constraint_gen_passes = additional_constraint_gen_passes or []
590+
self.mem_constraints: MemConstraints = mem_constraints
591+
self.additional_constraint_gen_passes: Sequence[ConstraintsGenPass] = (
592+
additional_constraint_gen_passes or []
593+
)
586594

587595
def __call__(self, graph_module: torch.fx.GraphModule) -> PassResult:
588-
constraint_gen_passes: list = [
589-
GenerateMemoryViewConstraints,
590-
GenerateSliceAndSelectNopConstraints,
591-
GenerateCatNopConstraints,
592-
] + self.additional_constraint_gen_passes
596+
constraint_gen_passes: Sequence[ConstraintsGenPass] = cast(
597+
list[ConstraintsGenPass],
598+
[
599+
GenerateMemoryViewConstraints,
600+
GenerateSliceAndSelectNopConstraints,
601+
GenerateCatNopConstraints,
602+
],
603+
) + list(self.additional_constraint_gen_passes)
593604
# Create a filter using the opt level in mem_constraints, and filter
594605
# the relevant passes.
595606
pass_filter = create_cadence_pass_filter(self.mem_constraints.opt_level)
@@ -602,6 +613,7 @@ def __call__(self, graph_module: torch.fx.GraphModule) -> PassResult:
602613
typing.Callable[[torch.fx.GraphModule], Optional[PassResult]],
603614
]
604615
],
616+
# pyre-ignore[6]: Incompatible parameter type.
605617
list(filter(pass_filter, constraint_gen_passes)),
606618
)
607619
]

0 commit comments

Comments
 (0)