diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index f2bc1765bc4c6..d1c8de754b8b3 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -23487,7 +23487,10 @@ EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op, // combining will typically form larger LMUL operations from the LMUL1 // operations emitted here, and that's okay because combining isn't // introducing new memory operations; it's just merging existing ones. - const unsigned MinVLenInBytes = Subtarget.getRealMinVLen()/8; + // NOTE: We limit to 1024 bytes to avoid creating an invalid MVT. + const unsigned MinVLenInBytes = + std::min(Subtarget.getRealMinVLen() / 8, 1024U); + if (Op.size() < MinVLenInBytes) // TODO: Figure out short memops. For the moment, do the default thing // which ends up using scalar sequences. diff --git a/llvm/test/CodeGen/RISCV/rvv/pr139075.ll b/llvm/test/CodeGen/RISCV/rvv/pr139075.ll new file mode 100644 index 0000000000000..33e8e13a21588 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/pr139075.ll @@ -0,0 +1,18 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvl16384b | FileCheck %s + +define void @a(ptr %0, ptr %1) { +; CHECK-LABEL: a: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 1024 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: addi a1, a1, 1024 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: addi a0, a0, 1024 +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret + call void @llvm.memcpy.p0.p0.i64(ptr align 1 %0, ptr align 4 %1, i64 2048, i1 false) + ret void +}