Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -892,6 +892,26 @@ let Predicates = [HasSVEorSME] in {
def : Pat<(nxv2i64 (splat_vector (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)))),
(DUP_ZI_D $a, $b)>;

// Duplicate Int immediate to active vector elements (zeroing).
def : Pat<(nxv16i8 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), (SVEDup0Undef))),
(CPY_ZPzI_B $pg, $a, $b)>;
def : Pat<(nxv8i16 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), (SVEDup0Undef))),
(CPY_ZPzI_H $pg, $a, $b)>;
def : Pat<(nxv4i32 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), (SVEDup0Undef))),
(CPY_ZPzI_S $pg, $a, $b)>;
def : Pat<(nxv2i64 (AArch64dup_mt PPR:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), (SVEDup0Undef))),
(CPY_ZPzI_D $pg, $a, $b)>;

// Duplicate Int immediate to active vector elements (merging).
def : Pat<(nxv16i8 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), (nxv16i8 ZPR:$z))),
(CPY_ZPmI_B $z, $pg, $a, $b)>;
def : Pat<(nxv8i16 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), (nxv8i16 ZPR:$z))),
(CPY_ZPmI_H $z, $pg, $a, $b)>;
def : Pat<(nxv4i32 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), (nxv4i32 ZPR:$z))),
(CPY_ZPmI_S $z, $pg, $a, $b)>;
def : Pat<(nxv2i64 (AArch64dup_mt PPR:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), (nxv2i64 ZPR:$z))),
(CPY_ZPmI_D $z, $pg, $a, $b)>;

// Duplicate immediate FP into all vector elements.
def : Pat<(nxv2f16 (splat_vector (f16 fpimm:$val))),
(DUP_ZR_H (MOVi32imm (bitcast_fpimm_to_i32 f16:$val)))>;
Expand Down
83 changes: 83 additions & 0 deletions llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s

; Zeroing.

define dso_local <vscale x 16 x i8> @mov_z_b(<vscale x 16 x i1> %pg) {
; CHECK-LABEL: mov_z_b:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
; CHECK-NEXT: ret
%r = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, i8 1)
ret <vscale x 16 x i8> %r
}

define dso_local <vscale x 8 x i16> @mov_z_h(<vscale x 8 x i1> %pg) {
; CHECK-LABEL: mov_z_h:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1
; CHECK-NEXT: ret
%r = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, i16 1)
ret <vscale x 8 x i16> %r
}

define dso_local <vscale x 4 x i32> @mov_z_s(<vscale x 4 x i1> %pg) {
; CHECK-LABEL: mov_z_s:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
; CHECK-NEXT: ret
%r = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, i32 1)
ret <vscale x 4 x i32> %r
}

define dso_local <vscale x 2 x i64> @mov_z_d(<vscale x 2 x i1> %pg) {
; CHECK-LABEL: mov_z_d:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.d, p0/z, #1 // =0x1
; CHECK-NEXT: ret
%r = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, i64 1)
ret <vscale x 2 x i64> %r
}

; Merging.

define dso_local <vscale x 16 x i8> @mov_m_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg) {
; CHECK-LABEL: mov_m_b:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.b, p0/m, #1 // =0x1
; CHECK-NEXT: ret
%r = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i8 1)
ret <vscale x 16 x i8> %r
}

define dso_local <vscale x 8 x i16> @mov_m_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg) {
; CHECK-LABEL: mov_m_h:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.h, p0/m, #1 // =0x1
; CHECK-NEXT: ret
%r = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i16 1)
ret <vscale x 8 x i16> %r
}

define dso_local <vscale x 4 x i32> @mov_m_s(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg) {
; CHECK-LABEL: mov_m_s:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.s, p0/m, #1 // =0x1
; CHECK-NEXT: ret
%r = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 1)
ret <vscale x 4 x i32> %r
}

define dso_local <vscale x 2 x i64> @mov_m_d(<vscale x 2 x i64> %zd, <vscale x 2 x i1> %pg) {
; CHECK-LABEL: mov_m_d:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.d, p0/m, #1 // =0x1
; CHECK-NEXT: ret
%r = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %zd, <vscale x 2 x i1> %pg, i64 1)
ret <vscale x 2 x i64> %r
}

declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8)
declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16)
declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64)
Loading