Skip to content

Commit f4a39a8

Browse files
Fold SVE mul and mul_u to neg during isel (#160828)
Replace mul and mul_u ops with a neg operation if their second operand is a splat value -1. Apply the optimization also for mul_u ops if their first operand is a splat value -1 due to their commutativity.
1 parent 952b123 commit f4a39a8

File tree

2 files changed

+161
-0
lines changed

2 files changed

+161
-0
lines changed

llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1010,6 +1010,36 @@ let Predicates = [HasSVE_or_SME] in {
10101010
defm SEL_ZPZZ : sve_int_sel_vvv<"sel", vselect>;
10111011

10121012
defm SPLICE_ZPZ : sve_int_perm_splice<"splice", AArch64splice>;
1013+
1014+
// mul x (splat -1) -> neg x
1015+
def : Pat<(nxv16i8 (AArch64mul_m1 nxv16i1:$Op1, nxv16i8:$Op2, (nxv16i8 (splat_vector (i32 -1))))),
1016+
(NEG_ZPmZ_B $Op2, $Op1, $Op2)>;
1017+
def : Pat<(nxv8i16 (AArch64mul_m1 nxv8i1:$Op1, nxv8i16:$Op2, (nxv8i16 (splat_vector (i32 -1))))),
1018+
(NEG_ZPmZ_H $Op2, $Op1, $Op2)>;
1019+
def : Pat<(nxv4i32 (AArch64mul_m1 nxv4i1:$Op1, nxv4i32:$Op2, (nxv4i32 (splat_vector (i32 -1))))),
1020+
(NEG_ZPmZ_S $Op2, $Op1, $Op2)>;
1021+
def : Pat<(nxv2i64 (AArch64mul_m1 nxv2i1:$Op1, nxv2i64:$Op2, (nxv2i64 (splat_vector (i64 -1))))),
1022+
(NEG_ZPmZ_D $Op2, $Op1, $Op2)>;
1023+
1024+
let AddedComplexity = 5 in {
1025+
def : Pat<(nxv16i8 (AArch64mul_p nxv16i1:$Op1, nxv16i8:$Op2, (nxv16i8 (splat_vector (i32 -1))))),
1026+
(NEG_ZPmZ_B_UNDEF $Op2, $Op1, $Op2)>;
1027+
def : Pat<(nxv8i16 (AArch64mul_p nxv8i1:$Op1, nxv8i16:$Op2, (nxv8i16 (splat_vector (i32 -1))))),
1028+
(NEG_ZPmZ_H_UNDEF $Op2, $Op1, $Op2)>;
1029+
def : Pat<(nxv4i32 (AArch64mul_p nxv4i1:$Op1, nxv4i32:$Op2, (nxv4i32 (splat_vector (i32 -1))))),
1030+
(NEG_ZPmZ_S_UNDEF $Op2, $Op1, $Op2)>;
1031+
def : Pat<(nxv2i64 (AArch64mul_p nxv2i1:$Op1, nxv2i64:$Op2, (nxv2i64 (splat_vector (i64 -1))))),
1032+
(NEG_ZPmZ_D_UNDEF $Op2, $Op1, $Op2)>;
1033+
}
1034+
1035+
def : Pat<(nxv16i8 (AArch64mul_m1 nxv16i1:$Op1, (nxv16i8 (splat_vector (i32 -1))), nxv16i8:$Op2)),
1036+
(NEG_ZPmZ_B (DUP_ZI_B -1, 0), $Op1, $Op2)>;
1037+
def : Pat<(nxv8i16 (AArch64mul_m1 nxv8i1:$Op1, (nxv8i16 (splat_vector (i32 -1))), nxv8i16:$Op2)),
1038+
(NEG_ZPmZ_H (DUP_ZI_H -1, 0), $Op1, $Op2)>;
1039+
def : Pat<(nxv4i32 (AArch64mul_m1 nxv4i1:$Op1, (nxv4i32 (splat_vector (i32 -1))), nxv4i32:$Op2)),
1040+
(NEG_ZPmZ_S (DUP_ZI_S -1, 0), $Op1, $Op2)>;
1041+
def : Pat<(nxv2i64 (AArch64mul_m1 nxv2i1:$Op1, (nxv2i64 (splat_vector (i64 -1))), nxv2i64:$Op2)),
1042+
(NEG_ZPmZ_D (DUP_ZI_D -1, 0), $Op1, $Op2)>;
10131043
} // End HasSVE_or_SME
10141044

10151045
// COMPACT - word and doubleword
Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc -verify-machineinstrs -mattr=+sve < %s | FileCheck %s
3+
4+
target triple = "aarch64-unknown-linux-gnu"
5+
6+
; Muls with (-1) as operand should fold to neg.
7+
define <vscale x 16 x i8> @mul_neg_fold_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
8+
; CHECK-LABEL: mul_neg_fold_i8:
9+
; CHECK: // %bb.0:
10+
; CHECK-NEXT: neg z0.b, p0/m, z0.b
11+
; CHECK-NEXT: ret
12+
%1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -1))
13+
ret <vscale x 16 x i8> %1
14+
}
15+
16+
define <vscale x 8 x i16> @mul_neg_fold_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
17+
; CHECK-LABEL: mul_neg_fold_i16:
18+
; CHECK: // %bb.0:
19+
; CHECK-NEXT: neg z0.h, p0/m, z0.h
20+
; CHECK-NEXT: ret
21+
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -1))
22+
ret <vscale x 8 x i16> %1
23+
}
24+
25+
define <vscale x 4 x i32> @mul_neg_fold_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
26+
; CHECK-LABEL: mul_neg_fold_i32:
27+
; CHECK: // %bb.0:
28+
; CHECK-NEXT: neg z0.s, p0/m, z0.s
29+
; CHECK-NEXT: ret
30+
%1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -1))
31+
ret <vscale x 4 x i32> %1
32+
}
33+
34+
define <vscale x 2 x i64> @mul_neg_fold_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
35+
; CHECK-LABEL: mul_neg_fold_i64:
36+
; CHECK: // %bb.0:
37+
; CHECK-NEXT: neg z0.d, p0/m, z0.d
38+
; CHECK-NEXT: ret
39+
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -1))
40+
ret <vscale x 2 x i64> %1
41+
}
42+
43+
define <vscale x 16 x i8> @mul_neg_fold_u_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
44+
; CHECK-LABEL: mul_neg_fold_u_i8:
45+
; CHECK: // %bb.0:
46+
; CHECK-NEXT: neg z0.b, p0/m, z0.b
47+
; CHECK-NEXT: ret
48+
%1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -1))
49+
ret <vscale x 16 x i8> %1
50+
}
51+
52+
define <vscale x 8 x i16> @mul_neg_fold_u_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
53+
; CHECK-LABEL: mul_neg_fold_u_i16:
54+
; CHECK: // %bb.0:
55+
; CHECK-NEXT: neg z0.h, p0/m, z0.h
56+
; CHECK-NEXT: ret
57+
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -1))
58+
ret <vscale x 8 x i16> %1
59+
}
60+
61+
define <vscale x 4 x i32> @mul_neg_fold_u_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
62+
; CHECK-LABEL: mul_neg_fold_u_i32:
63+
; CHECK: // %bb.0:
64+
; CHECK-NEXT: neg z0.s, p0/m, z0.s
65+
; CHECK-NEXT: ret
66+
%1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -1))
67+
ret <vscale x 4 x i32> %1
68+
}
69+
70+
define <vscale x 2 x i64> @mul_neg_fold_u_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
71+
; CHECK-LABEL: mul_neg_fold_u_i64:
72+
; CHECK: // %bb.0:
73+
; CHECK-NEXT: neg z0.d, p0/m, z0.d
74+
; CHECK-NEXT: ret
75+
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -1))
76+
ret <vscale x 2 x i64> %1
77+
}
78+
79+
define <vscale x 16 x i8> @mul_neg_fold_different_argument_order_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
80+
; CHECK-LABEL: mul_neg_fold_different_argument_order_i8:
81+
; CHECK: // %bb.0:
82+
; CHECK-NEXT: mov z1.b, #-1 // =0xffffffffffffffff
83+
; CHECK-NEXT: neg z1.b, p0/m, z0.b
84+
; CHECK-NEXT: mov z0.d, z1.d
85+
; CHECK-NEXT: ret
86+
%1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> splat(i8 -1), <vscale x 16 x i8> %a)
87+
ret <vscale x 16 x i8> %1
88+
}
89+
90+
define <vscale x 8 x i16> @mul_neg_fold_different_argument_order_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
91+
; CHECK-LABEL: mul_neg_fold_different_argument_order_i16:
92+
; CHECK: // %bb.0:
93+
; CHECK-NEXT: mov z1.h, #-1 // =0xffffffffffffffff
94+
; CHECK-NEXT: neg z1.h, p0/m, z0.h
95+
; CHECK-NEXT: mov z0.d, z1.d
96+
; CHECK-NEXT: ret
97+
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> splat(i16 -1), <vscale x 8 x i16> %a)
98+
ret <vscale x 8 x i16> %1
99+
}
100+
101+
define <vscale x 4 x i32> @mul_neg_fold_different_argument_order_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
102+
; CHECK-LABEL: mul_neg_fold_different_argument_order_i32:
103+
; CHECK: // %bb.0:
104+
; CHECK-NEXT: mov z1.s, #-1 // =0xffffffffffffffff
105+
; CHECK-NEXT: neg z1.s, p0/m, z0.s
106+
; CHECK-NEXT: mov z0.d, z1.d
107+
; CHECK-NEXT: ret
108+
%1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> splat(i32 -1), <vscale x 4 x i32> %a)
109+
ret <vscale x 4 x i32> %1
110+
}
111+
112+
define <vscale x 2 x i64> @mul_neg_fold_different_argument_order_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
113+
; CHECK-LABEL: mul_neg_fold_different_argument_order_i64:
114+
; CHECK: // %bb.0:
115+
; CHECK-NEXT: mov z1.d, #-1 // =0xffffffffffffffff
116+
; CHECK-NEXT: neg z1.d, p0/m, z0.d
117+
; CHECK-NEXT: mov z0.d, z1.d
118+
; CHECK-NEXT: ret
119+
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> splat(i64 -1), <vscale x 2 x i64> %a)
120+
ret <vscale x 2 x i64> %1
121+
}
122+
123+
declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
124+
declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
125+
declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
126+
declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
127+
128+
declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
129+
declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
130+
declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
131+
declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)

0 commit comments

Comments
 (0)