diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index e61dedb247756..8ab5cb2902110 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2569,6 +2569,19 @@ MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL, bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const { + + // Allow SVE loads/stores where the alignment >= the size of the element type, + // even with +strict-align. Predicated SVE loads/stores (e.g. ld1/st1), used + // for stores that come from IR, only require element-size alignment (even if + // unaligned accesses are disabled). Without this, these will be forced to + // have 16-byte alignment with +strict-align (and fail to lower as we don't + // yet support TLI.expandUnalignedLoad() and TLI.expandUnalignedStore()). + if (VT.isScalableVector()) { + unsigned ElementSizeBits = VT.getScalarSizeInBits(); + if (ElementSizeBits % 8 == 0 && Alignment >= Align(ElementSizeBits / 8)) + return true; + } + if (Subtarget->requiresStrictAlign()) return false; diff --git a/llvm/test/CodeGen/AArch64/sve-load-store-strict-align.ll b/llvm/test/CodeGen/AArch64/sve-load-store-strict-align.ll new file mode 100644 index 0000000000000..c5b0651ab01d4 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-load-store-strict-align.ll @@ -0,0 +1,62 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+strict-align < %s | FileCheck %s + +define void @nxv16i8(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %l3 = load , ptr %ldptr, align 1 + store %l3, ptr %stptr, align 1 + ret void +} + +define void @nxv8i16(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %l3 = load , ptr %ldptr, align 2 + store %l3, ptr %stptr, align 2 + ret void +} + +define void @nxv4i32(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %l3 = load , ptr %ldptr, align 4 + store %l3, ptr %stptr, align 4 + ret void +} + +define void @nxv2i64(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1d { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %l3 = load , ptr %ldptr, align 8 + store %l3, ptr %stptr, align 8 + ret void +} + +define void @nxv16i1(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv16i1: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr p0, [x0] +; CHECK-NEXT: str p0, [x1] +; CHECK-NEXT: ret + %l3 = load , ptr %ldptr, align 2 + store %l3, ptr %stptr, align 2 + ret void +} diff --git a/llvm/test/CodeGen/AArch64/sve-unaligned-load-store-strict-align.ll b/llvm/test/CodeGen/AArch64/sve-unaligned-load-store-strict-align.ll new file mode 100644 index 0000000000000..27637800f751f --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-unaligned-load-store-strict-align.ll @@ -0,0 +1,31 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s +; RUN: not --crash llc -mtriple=aarch64-linux-gnu -mattr=+sve,+strict-align < %s 2>&1 | FileCheck %s --check-prefix=CHECK-FIXME + +; REQUIRES: asserts + +; FIXME: Support TLI.expandUnalignedLoad()/TLI.expandUnalignedStore() for SVE. +; CHECK-FIXME: LLVM ERROR: Invalid size request on a scalable vector. + +define void @unaligned_nxv16i1(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: unaligned_nxv16i1: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr p0, [x0] +; CHECK-NEXT: str p0, [x1] +; CHECK-NEXT: ret + %l3 = load , ptr %ldptr, align 1 + store %l3, ptr %stptr, align 1 + ret void +} + +define void @unaligned_nxv2i64(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: unaligned_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1d { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %l3 = load , ptr %ldptr, align 4 + store %l3, ptr %stptr, align 4 + ret void +}