Skip to content

Commit 281823a

Browse files
committed
Merge remote-tracking branch 'origin/main' into fix-buck-llm-runner
2 parents e3970af + 94f62b7 commit 281823a

File tree

20 files changed

+339
-22
lines changed

20 files changed

+339
-22
lines changed

.ci/scripts/test_backend_linux.sh

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,17 @@ if [[ "$FLOW" == *qnn* ]]; then
3939
fi
4040

4141
if [[ "$FLOW" == *vulkan* ]]; then
42-
# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate
42+
# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate.
4343
source .ci/scripts/setup-vulkan-linux-deps.sh
4444

4545
EXTRA_BUILD_ARGS+=" -DEXECUTORCH_BUILD_VULKAN=ON"
4646
fi
4747

48+
if [[ "$FLOW" == *arm* ]]; then
49+
# Setup ARM deps.
50+
.ci/scripts/setup-arm-baremetal-tools.sh
51+
fi
52+
4853
# We need the runner to test the built library.
4954
PYTHON_EXECUTABLE=python CMAKE_ARGS="$EXTRA_BUILD_ARGS" .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release --editable true
5055

.ci/scripts/test_llava.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ run_and_verify() {
149149

150150
# verify result.txt
151151
RESULT=$(cat result.txt)
152-
EXPECTED_PREFIX="ASSISTANT: image captures a basketball game in progress, with"
152+
EXPECTED_PREFIX="ASSISTANT: The image captures a basketball game in progress, with"
153153

154154
if [[ "${RESULT}" == *"${EXPECTED_PREFIX}"* ]]; then
155155
echo "Expected result prefix: ${EXPECTED_PREFIX}"

.github/workflows/_test_backend.yml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,11 @@ on:
3131
required: false
3232
type: boolean
3333
default: false
34+
runner-linux:
35+
description: 'Runner type for Linux jobs'
36+
required: false
37+
type: string
38+
default: linux.4xlarge.memory
3439

3540
jobs:
3641
test-backend-linux:
@@ -44,7 +49,7 @@ jobs:
4449
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
4550
with:
4651
ref: ${{ inputs.ref }}
47-
runner: linux.4xlarge.memory
52+
runner: ${{ inputs.runner-linux }}
4853
docker-image: ci-image:executorch-ubuntu-22.04-clang12
4954
submodules: recursive
5055
timeout: ${{ inputs.timeout }}
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
name: Test ARM Backend
2+
3+
on:
4+
schedule:
5+
- cron: 0 2 * * *
6+
push:
7+
tags:
8+
- ciflow/nightly/*
9+
pull_request:
10+
paths:
11+
- .github/workflows/test-backend-arm.yml
12+
- .github/workflows/_test_backend.yml
13+
workflow_dispatch:
14+
15+
concurrency:
16+
group: ${{ github.workflow }}--${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
17+
cancel-in-progress: true
18+
19+
jobs:
20+
test-arm:
21+
uses: ./.github/workflows/_test_backend.yml
22+
with:
23+
backend: arm
24+
flows: '["arm_tosa"]'
25+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
26+
timeout: 120
27+
run-linux: true

.github/workflows/test-backend-qnn.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,3 +25,4 @@ jobs:
2525
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
2626
timeout: 120
2727
run-linux: true
28+
runner-linux: linux.8xlarge.memory

backends/arm/test/tester/arm_tester.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@
5757

5858
from executorch.backends.arm.vgf import VgfCompileSpec, VgfPartitioner
5959

60+
from executorch.backends.test.harness.error_statistics import ErrorStatistics
6061
from executorch.backends.test.harness.stages import Stage, StageType
6162
from executorch.backends.xnnpack.test.tester import Tester
6263
from executorch.devtools.backend_debug import get_delegation_info
@@ -333,6 +334,7 @@ def to_edge_transform_and_lower(
333334
transform_passes: Optional[
334335
Union[Sequence[PassType], Dict[str, Sequence[PassType]]]
335336
] = None,
337+
generate_etrecord: bool = False,
336338
):
337339
if transform_passes is not None:
338340
raise RuntimeError(
@@ -367,7 +369,9 @@ def to_edge_transform_and_lower(
367369
to_edge_and_lower_stage.partitioners = partitioners
368370
if edge_compile_config is not None:
369371
to_edge_and_lower_stage.edge_compile_conf = edge_compile_config
370-
return super().to_edge_transform_and_lower(to_edge_and_lower_stage)
372+
return super().to_edge_transform_and_lower(
373+
to_edge_and_lower_stage, generate_etrecord=generate_etrecord
374+
)
371375

372376
def to_executorch(self, to_executorch_stage: Optional[ToExecutorch] | None = None):
373377
if to_executorch_stage is None:
@@ -402,6 +406,7 @@ def run_method_and_compare_outputs(
402406
qtol=0,
403407
error_callbacks=None,
404408
run_eager_mode=False,
409+
statistics_callback: Callable[[ErrorStatistics], None] | None = None,
405410
):
406411
"""
407412
Compares the run_artifact output of 'stage' with the output of a reference stage.
@@ -657,10 +662,17 @@ def _compare_outputs(
657662
rtol=1e-03,
658663
qtol=0,
659664
error_callbacks=None,
665+
statistics_callback: Callable[[ErrorStatistics], None] | None = None,
660666
):
661667
try:
662668
super()._compare_outputs(
663-
reference_output, stage_output, quantization_scale, atol, rtol, qtol
669+
reference_output,
670+
stage_output,
671+
quantization_scale,
672+
atol,
673+
rtol,
674+
qtol,
675+
statistics_callback=statistics_callback,
664676
)
665677
except AssertionError as e:
666678
if error_callbacks is None:

backends/cadence/aot/ops_registrations.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -324,6 +324,19 @@
324324
"rope.out(Tensor input, Tensor sin_tensor, Tensor cos_tensor, Tensor? pos, *, Tensor(a!) out) -> Tensor(a!)"
325325
)
326326

327+
lib.define(
328+
"quantized_softmax(Tensor input, Tensor mask, int dim, Tensor in_scale, Tensor in_zero_point, Tensor out_scale, Tensor out_zero_point) -> (Tensor out)"
329+
)
330+
lib.define(
331+
"quantized_softmax.per_tensor(Tensor input, Tensor mask, int dim, float in_scale, int in_zero_point, float out_scale, int out_zero_point) -> (Tensor out)"
332+
)
333+
lib.define(
334+
"quantized_softmax.out(Tensor input, Tensor mask, int dim, Tensor in_scale, Tensor in_zero_point, Tensor out_scale, Tensor out_zero_point, *, Tensor(a!) out) -> Tensor (a!)"
335+
)
336+
lib.define(
337+
"quantized_softmax.per_tensor_out(Tensor input, Tensor mask, int dim, float in_scale, int in_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor (a!)"
338+
)
339+
327340
# Load/store with iDMA. These only exist before memory planning.
328341
# Post memory planning, we check that outputs/inputs for the load/store are in
329342
# DTCM and replace idma_load/idma_store with idma_copy.
@@ -2329,3 +2342,29 @@ def softmax_f32_f32_meta(
23292342
half_to_float: Optional[bool] = None,
23302343
) -> torch.Tensor:
23312344
return self.new_empty(self.size(), dtype=self.dtype)
2345+
2346+
2347+
@register_fake("cadence::quantized_softmax")
2348+
def quantized_softmax_meta(
2349+
input: torch.Tensor,
2350+
mask: torch.Tensor,
2351+
dim: int,
2352+
in_scale: torch.Tensor,
2353+
in_zero_point: torch.Tensor,
2354+
out_scale: torch.Tensor,
2355+
out_zero_point: torch.Tensor,
2356+
) -> torch.Tensor:
2357+
return input.new_empty(input.size(), dtype=input.dtype)
2358+
2359+
2360+
@register_fake("cadence::quantized_softmax.per_tensor")
2361+
def quantized_softmax_per_tensor_meta(
2362+
input: torch.Tensor,
2363+
mask: torch.Tensor,
2364+
dim: int,
2365+
in_scale: float,
2366+
in_zero_point: int,
2367+
out_scale: float,
2368+
out_zero_point: int,
2369+
) -> torch.Tensor:
2370+
return input.new_empty(input.size(), dtype=input.dtype)

backends/cadence/aot/quantizer/fusion_pass.py

Lines changed: 78 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,10 @@
66

77
# pyre-strict
88

9-
from typing import Any, Dict, List, Tuple
9+
from typing import Any, cast, Dict, List, Tuple
1010

1111
import torch
12+
from executorch.backends.cadence.aot.compiler_utils import get_shape
1213
from executorch.backends.cadence.aot.quantizer.patterns import (
1314
AddmmPattern,
1415
AddPattern,
@@ -25,6 +26,7 @@
2526
MatmulPattern,
2627
ReluPattern0,
2728
ReluPattern1,
29+
SoftmaxPattern,
2830
)
2931
from executorch.backends.cadence.aot.quantizer.utils import (
3032
check_out_zero_point_is_min_range,
@@ -388,6 +390,73 @@ def get_args_and_kwargs_relu(
388390
return args, kwargs
389391

390392

393+
def get_args_and_kwargs_softmax(
394+
graph_module: GraphModule,
395+
inputs_inputs: List[fx.Node],
396+
dequants_inputs: List[fx.Node],
397+
quant_node: fx.Node,
398+
op_node: fx.Node,
399+
) -> Tuple[Tuple[ArgsType, ...], Dict[str, ArgsType]]:
400+
# Make a dummy mask tensor
401+
mask_shape = get_shape(graph_module, cast(fx.Node, quant_node.args[0]))
402+
mask_shape = list(mask_shape) if mask_shape else []
403+
mask_shape[-1] = mask_shape[-1] // 16
404+
mask_tensor = graph_module.graph.call_function(
405+
torch.ops.aten.full.default,
406+
(
407+
mask_shape,
408+
0.0,
409+
),
410+
{"dtype": torch.int32},
411+
)
412+
# Make the scale and zero_point tensors
413+
in_scale_tensor = graph_module.graph.call_function(
414+
torch.ops.aten.full.default,
415+
(
416+
[1],
417+
dequants_inputs[0].args[1],
418+
),
419+
{"dtype": torch.float32},
420+
)
421+
in_zero_point_tensor = graph_module.graph.call_function(
422+
torch.ops.aten.full.default,
423+
(
424+
[1],
425+
dequants_inputs[0].args[2],
426+
),
427+
{"dtype": torch.int32},
428+
)
429+
out_scale_tensor = graph_module.graph.call_function(
430+
torch.ops.aten.full.default,
431+
(
432+
[1],
433+
quant_node.args[1],
434+
),
435+
{"dtype": torch.float32},
436+
)
437+
out_zero_point_tensor = graph_module.graph.call_function(
438+
torch.ops.aten.full.default,
439+
(
440+
[1],
441+
quant_node.args[2],
442+
),
443+
{"dtype": torch.int32},
444+
)
445+
446+
# Make the args and kwargs for the replacement op
447+
args = (
448+
inputs_inputs[0],
449+
mask_tensor,
450+
op_node.args[1],
451+
in_scale_tensor,
452+
in_zero_point_tensor,
453+
out_scale_tensor,
454+
out_zero_point_tensor,
455+
)
456+
kwargs = {}
457+
return args, kwargs
458+
459+
391460
class QuantFusion(ExportPass):
392461
# pyre-ignore[2]: Parameter `patterns` has no type specified
393462
def __init__(self, patterns) -> None:
@@ -543,6 +612,14 @@ def call(self, graph_module: fx.GraphModule) -> PassResult: # noqa: C901
543612
dequants_inputs,
544613
quant_node,
545614
)
615+
elif isinstance(pattern, SoftmaxPattern):
616+
args, kwargs = get_args_and_kwargs_softmax(
617+
graph_module,
618+
inputs_inputs,
619+
dequants_inputs,
620+
quant_node,
621+
anchor_output_node,
622+
)
546623
fused = graph_module.graph.call_function(
547624
pattern.replacement_op(),
548625
args,

backends/cadence/aot/quantizer/patterns.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -485,3 +485,25 @@ def partition_types(self) -> List[OpOverload]:
485485
class Conv2dReluPattern1(ConvReluBasePattern):
486486
def partition_types(self) -> List[OpOverload]:
487487
return [torch.ops.aten.conv2d.default, torch.ops.aten.relu_.default]
488+
489+
490+
class SoftmaxPattern(QuantizationPattern):
491+
492+
def partition_types(self) -> List[OpOverload]:
493+
return [torch.ops.aten._softmax.default]
494+
495+
def get_anchors(
496+
self, gm: fx.GraphModule, fused_partition: List[fx.GraphModule]
497+
) -> PartitionAnchors:
498+
# pyre-fixme[29]: `Union[BoundMethod[typing.Callable(torch._C.TensorBase.__ge...
499+
softmax_node = fused_partition[0].nodes[-1]
500+
501+
return PartitionAnchors(
502+
inputs=[(softmax_node, 0)],
503+
weights=[],
504+
biases=[],
505+
output=[(softmax_node,)],
506+
)
507+
508+
def replacement_op(self) -> OpOverload:
509+
return torch.ops.cadence.quantized_softmax.default

backends/cadence/aot/quantizer/quantizer.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
QuantizationPattern,
2828
ReluPattern0,
2929
ReluPattern1,
30+
SoftmaxPattern,
3031
)
3132
from executorch.backends.cadence.aot.quantizer.utils import (
3233
find_sequential_partitions_aten,
@@ -58,6 +59,15 @@
5859
observer_or_fake_quant_ctr=HistogramObserver.with_args(eps=2**-12),
5960
)
6061

62+
act_qspec_asym16s = QuantizationSpec(
63+
dtype=torch.int16,
64+
quant_min=-32768,
65+
quant_max=32767,
66+
qscheme=torch.per_tensor_affine,
67+
is_dynamic=False,
68+
observer_or_fake_quant_ctr=HistogramObserver.with_args(eps=2**-12),
69+
)
70+
6171
wgt_qspec_asym8s = QuantizationSpec(
6272
dtype=torch.int8,
6373
quant_min=-128,
@@ -92,6 +102,13 @@
92102
None,
93103
)
94104

105+
qconfig_A16 = QuantizationConfig(
106+
act_qspec_asym16s,
107+
act_qspec_asym16s,
108+
wgt_qspec_asym8s,
109+
None,
110+
)
111+
95112

96113
class CadenceAtenQuantizer(Quantizer):
97114
def __init__(
@@ -283,3 +300,15 @@ def __init__(self, quantizers: Optional[list[Quantizer]] = None) -> None:
283300
quantizers.append(CadenceAtenQuantizer(AddPattern(), qconfig_A8W8))
284301
quantizers.append(CadenceAtenQuantizer(CatPattern(), qconfig_A8W8))
285302
super().__init__(quantizers)
303+
304+
305+
class CadenceWithSoftmaxQuantizer(CadenceQuantizer):
306+
"""
307+
Quantizer including A16 softmax
308+
"""
309+
310+
def __init__(self, quantizers: Optional[list[Quantizer]] = None) -> None:
311+
if quantizers is None:
312+
quantizers = get_cadence_default_quantizers()
313+
quantizers.append(CadenceAtenQuantizer(SoftmaxPattern(), qconfig_A16))
314+
super().__init__(quantizers)

0 commit comments

Comments
 (0)