Skip to content

Commit e473c84

Browse files
authored
Merge branch 'main' into nil-is-all-patch-1
2 parents 26b65b3 + 335de46 commit e473c84

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+1283
-181
lines changed

backends/cadence/aot/functions.yaml

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,21 @@
249249
- arg_meta: null
250250
kernel_name: impl::reference::quantized_relu_asym8u_asym8u_per_tensor_out
251251

252+
- func: cadence::quantized_add.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!)
253+
kernels:
254+
- arg_meta: null
255+
kernel_name: impl::reference::quantized_add_per_tensor_out
256+
257+
- func: cadence::quantized_add_asym8sxasym8s_asym8s.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!)
258+
kernels:
259+
- arg_meta: null
260+
kernel_name: impl::reference::quantized_add_asym8sxasym8s_asym8s_per_tensor_out
261+
262+
- func: cadence::quantized_add_asym8uxasym8u_asym8u.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!)
263+
kernels:
264+
- arg_meta: null
265+
kernel_name: impl::reference::quantized_add_asym8uxasym8u_asym8u_per_tensor_out
266+
252267
- func: cadence::quantized_matmul.out(Tensor X, int X_zero_point, Tensor Y, int Y_zero_point, Tensor? bias, int out_multiplier, int out_shift, int out_zero_point, bool transposed, *, Tensor(a!) out) -> Tensor(a!)
253268
kernels:
254269
- arg_meta: null

backends/cadence/aot/functions_hifi.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -404,6 +404,16 @@
404404
- arg_meta: null
405405
kernel_name: cadence::impl::HiFi::quantized_relu_asym8u_asym8u_per_tensor_out
406406

407+
- func: cadence::quantized_add_asym8sxasym8s_asym8s.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!)
408+
kernels:
409+
- arg_meta: null
410+
kernel_name: cadence::impl::HiFi::quantized_add_asym8sxasym8s_asym8s_per_tensor_out
411+
412+
- func: cadence::quantized_add_asym8uxasym8u_asym8u.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!)
413+
kernels:
414+
- arg_meta: null
415+
kernel_name: cadence::impl::HiFi::quantized_add_asym8uxasym8u_asym8u_per_tensor_out
416+
407417
- func: cadence::quantized_matmul.out(Tensor X, int X_zero_point, Tensor Y, int Y_zero_point, Tensor? bias, int out_multiplier, int out_shift, int out_zero_point, bool transposed, *, Tensor(a!) out) -> Tensor(a!)
408418
kernels:
409419
- arg_meta: null

backends/cadence/aot/memory_planning.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,9 @@ def plan_spec(
116116
Greedily place the spec in the first memory that can fit it.
117117
"""
118118
for spec.mem_id in range(1, self.get_num_memories()):
119+
if placement_constraints.is_mem_id_in_blocklist(spec, spec.mem_id):
120+
# Skip placement for blocked memory id.
121+
continue
119122
prev_offset, smallest_gap = 0, float("inf")
120123
for allocated_spec in state.allocated_buffers[spec.mem_id]:
121124
if not Verifier.lifetime_overlap(spec, allocated_spec):
@@ -141,11 +144,11 @@ def plan_spec(
141144
)
142145
if spec.mem_offset is None:
143146
spec.mem_offset = prev_offset
144-
if not self.is_valid_placement(spec, placement_constraints):
145-
spec.mem_offset = None
146-
continue
147-
else:
148-
spec.mem_offset = prev_offset
147+
148+
if not self.is_valid_placement(spec, placement_constraints):
149+
# Skip placement for invalid memory id.
150+
spec.mem_offset = None
151+
continue
149152

150153
state.place_spec(spec)
151154
# A data structure used for maintaining the tensor order

backends/cadence/aot/memory_planning_algo.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ def _place_memory_id_pinned_specs(
204204
for spec, c in spec_with_abs_constraint.items()
205205
if c is not None and c.pinned_memory_id == mem_id and c.offset is None
206206
}
207-
logging.error(f"Placing specs {mem_id_pinned_specs} for {mem_id=}")
207+
logging.debug(f"Placing specs {mem_id_pinned_specs} for {mem_id=}")
208208

209209
with self.block_memories_except(mem_id):
210210
self.plan(
@@ -220,7 +220,7 @@ def _place_memory_id_pinned_specs(
220220
if constraint is None:
221221
continue
222222

223-
logging.error(f"Placing spec {spec} with {constraint}")
223+
logging.debug(f"Placing spec {spec} with {constraint}")
224224

225225
if not state.is_placed(spec):
226226
raise MemoryError(

backends/cadence/aot/ops_registrations.py

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -325,6 +325,22 @@
325325
"quantized_add.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, "
326326
"int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!)"
327327
)
328+
lib.define(
329+
"quantized_add_asym8sxasym8s_asym8s.per_tensor(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, "
330+
"int Y_zero_point, float out_scale, int out_zero_point) -> Tensor"
331+
)
332+
lib.define(
333+
"quantized_add_asym8sxasym8s_asym8s.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, "
334+
"int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!)"
335+
)
336+
lib.define(
337+
"quantized_add_asym8uxasym8u_asym8u.per_tensor(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, "
338+
"int Y_zero_point, float out_scale, int out_zero_point) -> Tensor"
339+
)
340+
lib.define(
341+
"quantized_add_asym8uxasym8u_asym8u.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, "
342+
"int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!)"
343+
)
328344
lib.define(
329345
"quantized_mul.out(Tensor X, Tensor X_scale, Tensor X_zero_point, Tensor Y, Tensor Y_scale, "
330346
"Tensor Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!)"
@@ -503,6 +519,36 @@ def quantized_add_per_tensor_meta(
503519
return X.new_empty(out_size, dtype=X.dtype)
504520

505521

522+
@register_fake("cadence::quantized_add_asym8sxasym8s_asym8s.per_tensor")
523+
def quantized_add_asym8sxasym8s_asym8s_per_tensor_meta(
524+
X: torch.Tensor,
525+
X_scale: float,
526+
X_zero_point: int,
527+
Y: torch.Tensor,
528+
Y_scale: float,
529+
Y_zero_point: int,
530+
out_scale: float,
531+
out_zero_point: int,
532+
) -> torch.Tensor:
533+
out_size = torch.broadcast_shapes(X.size(), Y.size())
534+
return X.new_empty(out_size, dtype=X.dtype)
535+
536+
537+
@register_fake("cadence::quantized_add_asym8uxasym8u_asym8u.per_tensor")
538+
def quantized_add_asym8uxasym8u_asym8u_per_tensor_meta(
539+
X: torch.Tensor,
540+
X_scale: float,
541+
X_zero_point: int,
542+
Y: torch.Tensor,
543+
Y_scale: float,
544+
Y_zero_point: int,
545+
out_scale: float,
546+
out_zero_point: int,
547+
) -> torch.Tensor:
548+
out_size = torch.broadcast_shapes(X.size(), Y.size())
549+
return X.new_empty(out_size, dtype=X.dtype)
550+
551+
506552
@register_fake("cadence::quantized_linear")
507553
def quantized_linear_meta(
508554
src: torch.Tensor,

backends/cadence/aot/tests/test_memory_passes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1044,7 +1044,7 @@ class DummyMemIdBlockConstraintGen(PassBase):
10441044
mul: blocks 1, 3
10451045
"""
10461046

1047-
def __init__(self, memory_constraints: MemoryConfig):
1047+
def __init__(self, memory_constraints: MemConstraints):
10481048
self.memory_constraints = memory_constraints
10491049

10501050
def call(self, graph_module: torch.fx.GraphModule) -> PassResult:

backends/cadence/aot/tests/test_type_dispatch_passes.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -445,3 +445,53 @@ def test_uint8_dispatch_quantized_conv_nhwc_dilated(self) -> None:
445445
),
446446
1,
447447
)
448+
449+
def test_int8_dispatch_quantized_add(self) -> None:
450+
"""Test int8 x int8 inputs should dispatch to asym8sxasym8s_asym8s variant for quantized_add"""
451+
x = torch.randint(-128, 127, (2, 3), dtype=torch.int8)
452+
y = torch.randint(-128, 127, (2, 3), dtype=torch.int8)
453+
gm = single_op_builder(
454+
placeholders=(x, y),
455+
op=exir_ops.edge.cadence.quantized_add.per_tensor,
456+
args=(x, 1.0, 0, y, 1.0, 0, 1.0, 0),
457+
)
458+
p = CompileTimeTypeDispatchPass()
459+
gm = cast(PassResult, p(gm)).graph_module
460+
# Original op should be replaced
461+
self.assertEqual(
462+
count_node(gm, exir_ops.edge.cadence.quantized_add.per_tensor),
463+
0,
464+
)
465+
# Should be replaced with int8 specific variant
466+
self.assertEqual(
467+
count_node(
468+
gm,
469+
exir_ops.edge.cadence.quantized_add_asym8sxasym8s_asym8s.per_tensor,
470+
),
471+
1,
472+
)
473+
474+
def test_uint8_dispatch_quantized_add(self) -> None:
475+
"""Test uint8 x uint8 inputs should dispatch to asym8uxasym8u_asym8u variant for quantized_add"""
476+
x = torch.randint(0, 255, (2, 3), dtype=torch.uint8)
477+
y = torch.randint(0, 255, (2, 3), dtype=torch.uint8)
478+
gm = single_op_builder(
479+
placeholders=(x, y),
480+
op=exir_ops.edge.cadence.quantized_add.per_tensor,
481+
args=(x, 1.0, 0, y, 1.0, 0, 1.0, 0),
482+
)
483+
p = CompileTimeTypeDispatchPass()
484+
gm = cast(PassResult, p(gm)).graph_module
485+
# Original op should be replaced
486+
self.assertEqual(
487+
count_node(gm, exir_ops.edge.cadence.quantized_add.per_tensor),
488+
0,
489+
)
490+
# Should be replaced with uint8 specific variant
491+
self.assertEqual(
492+
count_node(
493+
gm,
494+
exir_ops.edge.cadence.quantized_add_asym8uxasym8u_asym8u.per_tensor,
495+
),
496+
1,
497+
)

backends/cadence/aot/type_dispatch.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,14 @@ class CompileTimeTypeDispatchPass(ExportPass):
8585
(torch.uint8,): "asym8u_asym8u",
8686
},
8787
),
88+
exir_ops.edge.cadence.quantized_add.per_tensor: OpConfig(
89+
"quantized_add",
90+
type_dispatch_suffixes={
91+
(torch.int8, torch.int8): "asym8sxasym8s_asym8s",
92+
(torch.uint8, torch.uint8): "asym8uxasym8u_asym8u",
93+
},
94+
weight_arg_idx=3,
95+
),
8896
}
8997

9098
def call_operator(

backends/cadence/fusion_g3/operators/op_clamp.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ bool is_out_of_bounds(CTYPE_VAL val) {
4545
}
4646

4747
ET_NODISCARD bool check_bounds(
48+
KernelRuntimeContext& ctx,
4849
const Scalar& val_scalar,
4950
const ScalarType& val_type,
5051
const ScalarType& out_type,
@@ -107,14 +108,14 @@ Tensor& clamp_out(
107108
if (has_min) {
108109
ET_KERNEL_CHECK(
109110
ctx,
110-
check_bounds(min_opt.value(), min_type, out_type, "minimum"),
111+
check_bounds(ctx, min_opt.value(), min_type, out_type, "minimum"),
111112
InvalidArgument,
112113
out);
113114
}
114115
if (has_max) {
115116
ET_KERNEL_CHECK(
116117
ctx,
117-
check_bounds(max_opt.value(), max_type, out_type, "maximum"),
118+
check_bounds(ctx, max_opt.value(), max_type, out_type, "maximum"),
118119
InvalidArgument,
119120
out);
120121
}

0 commit comments

Comments
 (0)