Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -730,6 +730,19 @@ let Predicates = [HasSVE_or_SME] in {
defm ABS_ZPmZ : sve_int_un_pred_arit< 0b110, "abs", AArch64abs_mt>;
defm NEG_ZPmZ : sve_int_un_pred_arit< 0b111, "neg", AArch64neg_mt>;

// mul x (splat -1) -> neg x
def : SVE_2_Op_Neg_One_Passthru_Pat<nxv16i8, AArch64mul_m1, nxv16i1, NEG_ZPmZ_B , i32>;
def : SVE_2_Op_Neg_One_Passthru_Pat<nxv8i16, AArch64mul_m1, nxv8i1, NEG_ZPmZ_H , i32>;
def : SVE_2_Op_Neg_One_Passthru_Pat<nxv4i32, AArch64mul_m1, nxv4i1, NEG_ZPmZ_S , i32>;
def : SVE_2_Op_Neg_One_Passthru_Pat<nxv2i64, AArch64mul_m1, nxv2i1, NEG_ZPmZ_D , i64>;

let AddedComplexity = 5 in {
def : SVE_2_Op_Neg_One_Passthru_Pat<nxv16i8, AArch64mul_p, nxv16i1, NEG_ZPmZ_B_UNDEF, i32>;
def : SVE_2_Op_Neg_One_Passthru_Pat<nxv8i16, AArch64mul_p, nxv8i1, NEG_ZPmZ_H_UNDEF, i32>;
def : SVE_2_Op_Neg_One_Passthru_Pat<nxv4i32, AArch64mul_p, nxv4i1, NEG_ZPmZ_S_UNDEF, i32>;
def : SVE_2_Op_Neg_One_Passthru_Pat<nxv2i64, AArch64mul_p, nxv2i1, NEG_ZPmZ_D_UNDEF, i64>;
}

defm CLS_ZPmZ : sve_int_un_pred_arit_bitwise< 0b000, "cls", AArch64cls_mt>;
defm CLZ_ZPmZ : sve_int_un_pred_arit_bitwise< 0b001, "clz", AArch64clz_mt>;
defm CNT_ZPmZ : sve_int_un_pred_arit_bitwise< 0b010, "cnt", AArch64cnt_mt>;
Expand Down Expand Up @@ -1012,6 +1025,11 @@ let Predicates = [HasSVE_or_SME] in {
defm SEL_ZPZZ : sve_int_sel_vvv<"sel", vselect>;

defm SPLICE_ZPZ : sve_int_perm_splice<"splice", AArch64splice>;

def : SVE_2_Op_Neg_One_Replace_Pat<nxv16i8, AArch64mul_m1, nxv16i1, NEG_ZPmZ_B , DUP_ZI_B, i32>;
def : SVE_2_Op_Neg_One_Replace_Pat<nxv8i16, AArch64mul_m1, nxv8i1, NEG_ZPmZ_H , DUP_ZI_H, i32>;
def : SVE_2_Op_Neg_One_Replace_Pat<nxv4i32, AArch64mul_m1, nxv4i1, NEG_ZPmZ_S , DUP_ZI_S, i32>;
def : SVE_2_Op_Neg_One_Replace_Pat<nxv2i64, AArch64mul_m1, nxv2i1, NEG_ZPmZ_D , DUP_ZI_D, i64>;
} // End HasSVE_or_SME

// COMPACT - word and doubleword
Expand Down
10 changes: 10 additions & 0 deletions llvm/lib/Target/AArch64/SVEInstrFormats.td
Original file line number Diff line number Diff line change
Expand Up @@ -723,6 +723,16 @@ class SVE2p1_Cvt_VG2_Pat<string name, SDPatternOperator intrinsic, ValueType out
: Pat<(out_vt (intrinsic in_vt:$Zn1, in_vt:$Zn2)),
(!cast<Instruction>(name) (REG_SEQUENCE ZPR2Mul2, in_vt:$Zn1, zsub0, in_vt:$Zn2, zsub1))>;

class SVE_2_Op_Neg_One_Replace_Pat<ValueType vt, SDPatternOperator op, ValueType pt,
Instruction inst, Instruction dup_inst, ValueType immT>
: Pat<(vt (op pt:$Op1, (vt (splat_vector (immT -1))), vt:$Op2)),
(inst (dup_inst -1, 0) , $Op1, $Op2)>;

class SVE_2_Op_Neg_One_Passthru_Pat<ValueType vt, SDPatternOperator op, ValueType pt,
Instruction inst, ValueType immT>
: Pat<(vt (op pt:$Op1, vt:$Op2, (vt (splat_vector (immT -1))))),
(inst $Op2, $Op1, $Op2)>;

//===----------------------------------------------------------------------===//
// SVE pattern match helpers.
//===----------------------------------------------------------------------===//
Expand Down
148 changes: 148 additions & 0 deletions llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -mattr=+sve < %s | FileCheck %s

target triple = "aarch64-unknown-linux-gnu"

; Muls with (-1) as operand should fold to neg.
define <vscale x 16 x i8> @mul_neg_fold_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
; CHECK-LABEL: mul_neg_fold_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: neg z0.b, p0/m, z0.b
; CHECK-NEXT: ret
%1 = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 -1)
%2 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %1)
ret <vscale x 16 x i8> %2
}

define <vscale x 8 x i16> @mul_neg_fold_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
; CHECK-LABEL: mul_neg_fold_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: neg z0.h, p0/m, z0.h
; CHECK-NEXT: ret
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 -1)
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %1)
ret <vscale x 8 x i16> %2
}

define <vscale x 4 x i32> @mul_neg_fold_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
; CHECK-LABEL: mul_neg_fold_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: neg z0.s, p0/m, z0.s
; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 -1)
%2 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %1)
ret <vscale x 4 x i32> %2
}

define <vscale x 2 x i64> @mul_neg_fold_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
; CHECK-LABEL: mul_neg_fold_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: neg z0.d, p0/m, z0.d
; CHECK-NEXT: ret
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 -1)
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %1)
ret <vscale x 2 x i64> %2
}

define <vscale x 16 x i8> @mul_neg_fold_u_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
; CHECK-LABEL: mul_neg_fold_u_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: neg z0.b, p0/m, z0.b
; CHECK-NEXT: ret
%1 = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 -1)
%2 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %1)
ret <vscale x 16 x i8> %2
}

define <vscale x 8 x i16> @mul_neg_fold_u_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
; CHECK-LABEL: mul_neg_fold_u_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: neg z0.h, p0/m, z0.h
; CHECK-NEXT: ret
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 -1)
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %1)
ret <vscale x 8 x i16> %2
}

define <vscale x 4 x i32> @mul_neg_fold_u_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
; CHECK-LABEL: mul_neg_fold_u_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: neg z0.s, p0/m, z0.s
; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 -1)
%2 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %1)
ret <vscale x 4 x i32> %2
}

define <vscale x 2 x i64> @mul_neg_fold_u_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
; CHECK-LABEL: mul_neg_fold_u_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: neg z0.d, p0/m, z0.d
; CHECK-NEXT: ret
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 -1)
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %1)
ret <vscale x 2 x i64> %2
}

define <vscale x 16 x i8> @mul_neg_fold_different_argument_order_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
; CHECK-LABEL: mul_neg_fold_different_argument_order_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z1.b, #-1 // =0xffffffffffffffff
; CHECK-NEXT: neg z1.b, p0/m, z0.b
; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%1 = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 -1)
%2 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %1, <vscale x 16 x i8> %a)
ret <vscale x 16 x i8> %2
}

define <vscale x 8 x i16> @mul_neg_fold_different_argument_order_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
; CHECK-LABEL: mul_neg_fold_different_argument_order_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z1.h, #-1 // =0xffffffffffffffff
; CHECK-NEXT: neg z1.h, p0/m, z0.h
; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 -1)
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %1, <vscale x 8 x i16> %a)
ret <vscale x 8 x i16> %2
}

define <vscale x 4 x i32> @mul_neg_fold_different_argument_order_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
; CHECK-LABEL: mul_neg_fold_different_argument_order_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z1.s, #-1 // =0xffffffffffffffff
; CHECK-NEXT: neg z1.s, p0/m, z0.s
; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 -1)
%2 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a)
ret <vscale x 4 x i32> %2
}

define <vscale x 2 x i64> @mul_neg_fold_different_argument_order_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
; CHECK-LABEL: mul_neg_fold_different_argument_order_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z1.d, #-1 // =0xffffffffffffffff
; CHECK-NEXT: neg z1.d, p0/m, z0.d
; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 -1)
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a)
ret <vscale x 2 x i64> %2
}

declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8)
declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16)
declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32)
declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64)

declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)

declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)