Skip to content

Commit cc64158

Browse files
authored
Merge branch 'main' into refactor/samsung_backend_schema
2 parents 816fac8 + 668e730 commit cc64158

File tree

57 files changed

+529
-245
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+529
-245
lines changed

.github/workflows/add-unanswered-to-project.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ jobs:
1212
- name: Add open issues and open, non-draft PRs to org project (excluding certain authors)
1313
uses: actions/github-script@v7
1414
with:
15+
github-token: ${{ secrets.ET_EXT_CONTRIB }}
1516
script: |
1617
const projectId = "PVT_kwDOAUB9vs4A_PUL"; // PyTorch org project 136
1718
const owner = 'pytorch';

backends/arm/_passes/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@
9191
ReplaceScalarWithTensorArgPassTOSABI,
9292
ReplaceScalarWithTensorArgPassTOSAMI,
9393
)
94+
from .rewrite_upsample import RewriteUpsamplePass # noqa
9495
from .scalars_to_attribute_pass import ScalarsToAttributePass # noqa
9596
from .size_adjust_input_pass import SizeAdjustInputPass # noqa
9697
from .to_tosa_memory_format_pass import ToTosaMemoryFormatPass # noqa

backends/arm/_passes/annotate_decomposed_matmul.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@
1111

1212
import torch
1313
from executorch.backends.arm._passes.arm_pass_utils import create_node
14+
from executorch.backends.arm._passes.fold_qdq_with_annotated_qparams_pass import (
15+
FoldAndAnnotateQParamsPass,
16+
)
1417

1518
from executorch.backends.arm.constants import DQ_OPS, Q_OPS
1619
from executorch.exir.dialects._ops import ops as exir_ops
@@ -29,7 +32,7 @@ class AnnotateDecomposedMatmulPass(ExportPass):
2932
matmul-op (can be mm or bmm).
3033
"""
3134

32-
_passes_required_after: Set[Type[ExportPass]] = set()
35+
_passes_required_after: Set[Type[ExportPass]] = {FoldAndAnnotateQParamsPass}
3336

3437
def _match_partition_to_node(
3538
self, node: torch.fx.Node, partitioned_inputs: List[torch.fx.Node]

backends/arm/_passes/arm_pass_manager.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@
9191
ReplaceScalarWithTensorArgPassTOSABI,
9292
ReplaceScalarWithTensorArgPassTOSAMI,
9393
RetraceFoldedDtypesPass,
94+
RewriteUpsamplePass,
9495
ScalarsToAttributePass,
9596
SizeAdjustInputPass,
9697
ToTosaMemoryFormatPass,
@@ -206,6 +207,7 @@ def _tosa_INT_pipeline(self, exported_program: ExportedProgram) -> GraphModule:
206207
# needs to happen before AddBiasPass, but after the table ops are inserted
207208
# to be able to validate that conv2d has right dtype arguments.
208209
self.add_pass(DecomposeConv2dWithInt16ActivationPass())
210+
self.add_pass(RewriteUpsamplePass(exported_program))
209211
self.add_pass(AddBiasPass(exported_program))
210212

211213
self.add_pass(FuseEqualPlaceholdersPass(exported_program))
@@ -290,6 +292,7 @@ def _tosa_FP_pipeline(self, exported_program: ExportedProgram) -> GraphModule:
290292
self.add_pass(FuseViewCopyTransform())
291293
self.add_pass(FuseConstantArgsPass(exported_program))
292294
self.add_pass(CastInt64BuffersToInt32Pass(exported_program))
295+
self.add_pass(RewriteUpsamplePass(exported_program))
293296
self.add_pass(AddBiasPass(exported_program))
294297
self.add_pass(InsertTableOpsPass(exported_program))
295298
self.add_pass(FuseEqualPlaceholdersPass(exported_program))

backends/arm/_passes/conv1d_unsqueeze_pass.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@
88

99
from typing import Set, Type
1010

11+
from executorch.backends.arm._passes.add_bias_pass import AddBiasPass
12+
from executorch.backends.arm._passes.size_adjust_input_pass import SizeAdjustInputPass
13+
1114
from executorch.exir.dialects._ops import ops as exir_ops
1215
from executorch.exir.pass_base import ExportPass
1316

@@ -23,7 +26,7 @@ class Conv1dUnsqueezePass(ExportPass):
2326
3) squeeze the output back down to 3d.
2427
"""
2528

26-
_passes_required_after: Set[Type[ExportPass]] = set()
29+
_passes_required_after: Set[Type[ExportPass]] = {AddBiasPass, SizeAdjustInputPass}
2730

2831
def call_operator(self, op, args, kwargs, meta):
2932
if op != exir_ops.edge.aten.convolution.default:

backends/arm/_passes/convert_any_default_dim_dims_pass.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66
from typing import Set, Type
77

88
import torch
9+
from executorch.backends.arm._passes.convert_squeezes_to_view import (
10+
ConvertSqueezesToViewPass,
11+
)
912
from executorch.exir.dialects._ops import ( # type: ignore[import-not-found]
1013
ops as exir_ops,
1114
)
@@ -46,7 +49,7 @@ class ConvertAnyDefaultDimDimsPass(ExportPass):
4649
squeeze(dim = [dim1, dim2])
4750
"""
4851

49-
_passes_required_after: Set[Type[ExportPass]] = set()
52+
_passes_required_after: Set[Type[ExportPass]] = {ConvertSqueezesToViewPass}
5053

5154
def call(self, graph_module: torch.fx.GraphModule):
5255
modified = False

backends/arm/_passes/convert_expand_copy_to_repeat.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@
1010

1111
import torch
1212

13+
from executorch.backends.arm._passes.unsqueeze_before_repeat_pass import (
14+
UnsqueezeBeforeRepeatPass,
15+
)
1316
from executorch.exir.dialects._ops import ops as exir_ops
1417
from executorch.exir.pass_base import ExportPass
1518

@@ -50,7 +53,7 @@ class ConvertExpandCopyToRepeatPass(ExportPass):
5053
Replace expand copy with repeat since it is a repeat that can only repeat singleton dimensions.
5154
"""
5255

53-
_passes_required_after: Set[Type[ExportPass]] = set()
56+
_passes_required_after: Set[Type[ExportPass]] = {UnsqueezeBeforeRepeatPass}
5457

5558
expand_copy = exir_ops.edge.aten.expand_copy.default
5659
repeat = exir_ops.edge.aten.repeat.default

backends/arm/_passes/convert_full_like_to_full_pass.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,14 @@
55

66
from typing import Set, Type
77

8+
from executorch.backends.arm._passes.arm_pass import ArmPass
9+
from executorch.backends.arm._passes.fuse_constant_ops_pass import ComputeConstantOpsAOT
10+
811
from executorch.exir.dialects._ops import ops as exir_ops
912
from executorch.exir.pass_base import ExportPass
1013

1114

12-
class ConvertFullLikeToFullPass(ExportPass):
15+
class ConvertFullLikeToFullPass(ArmPass):
1316
"""As per the full_like pytorch documentation,
1417
`torch.full_like(input, fill_value)` is equivalent to
1518
`torch.full(input.size(),
@@ -21,7 +24,7 @@ class ConvertFullLikeToFullPass(ExportPass):
2124
Skip layout and device since it's not relevant for our backend.
2225
"""
2326

24-
_passes_required_after: Set[Type[ExportPass]] = set()
27+
_passes_required_after: Set[Type[ExportPass]] = {ComputeConstantOpsAOT}
2528

2629
def call_operator(self, op, args, kwargs, meta):
2730
if op not in [

backends/arm/_passes/convert_int64_const_ops_to_int32.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ class ConvertInt64ConstOpsToInt32Pass(ExportPass):
3131
5. `torch.tensor`
3232
"""
3333

34-
_passes_required_after: Set[Type[ExportPass]] = set()
34+
_passes_required_after: Set[Type[ExportPass]] = {ComputeConstantOpsAOT}
3535

3636
torch_ops = [
3737
torch.ops.aten.full.default,

backends/arm/_passes/convert_minmax_pass.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66
from typing import Set, Type
77

88
import torch
9+
from executorch.backends.arm._passes.convert_squeezes_to_view import (
10+
ConvertSqueezesToViewPass,
11+
)
912
from executorch.exir.dialects._ops import ops as exir_ops
1013
from executorch.exir.pass_base import ExportPass, PassResult
1114

@@ -31,7 +34,7 @@ class ConvertMinMaxPass(ExportPass):
3134
squeeze(dim = [dim1, dim2])
3235
"""
3336

34-
_passes_required_after: Set[Type[ExportPass]] = set()
37+
_passes_required_after: Set[Type[ExportPass]] = {ConvertSqueezesToViewPass}
3538

3639
def check_argmax(self, node):
3740
"""

0 commit comments

Comments
 (0)